text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
""" Simple wrapper for a Spark Context to provide loading functionality """
import os
from thunder.utils.common import checkParams, handleFormat, raiseErrorIfPathExists
from thunder.utils.datasets import DataSets
from thunder.utils.params import Params
class ThunderContext():
"""
Wrapper for a SparkContext that provides an entry point for loading and saving.
Also supports creation of example datasets, and loading example
data both locally and from EC2.
"""
def __init__(self, sparkcontext):
self._sc = sparkcontext
self._credentials = None
@classmethod
def start(cls, *args, **kwargs):
"""
Starts a ThunderContext using the same arguments as SparkContext
"""
from pyspark import SparkContext
return ThunderContext(SparkContext(*args, **kwargs))
def addPyFile(self, path):
"""
Adds a .zip or .py or .egg dependency for all tasks to be executed
as part of this context.
Uses the corresponding SparkContext method.
Parameters
----------
path : str
Path to a file as either a local file, file in HDFS, or URI.
"""
self._sc.addPyFile(path)
def stop(self):
"""
Shut down the context
"""
self._sc.stop()
def loadSeries(self, dataPath, nkeys=None, nvalues=None, inputFormat='binary', minPartitions=None,
maxPartitionSize='32mb', confFilename='conf.json', keyType=None, valueType=None, keyPath=None,
varName=None):
"""
Loads a Series object from data stored as binary, text, npy, or mat.
For binary and text, supports single files or multiple files stored on a local file system,
a networked file system (mounted and available on all cluster nodes), Amazon S3, or HDFS.
For local formats (npy and mat) only local file systems currently supported.
Parameters
----------
dataPath: string
Path to data files or directory, as either a local filesystem path or a URI.
May include a single '*' wildcard in the filename. Examples of valid dataPaths include
'local/directory/*.stack", "s3n:///my-s3-bucket/data/", or "file:///mnt/another/directory/".
nkeys: int, optional (required if `inputFormat` is 'text'), default = None
Number of keys per record (e.g. 3 for (x, y, z) coordinate keys). Must be specified for
text data; can be specified here or in a configuration file for binary data.
nvalues: int, optional (required if `inputFormat` is 'text')
Number of values per record. Must be specified here or in a configuration file for binary data.
inputFormat: {'text', 'binary', 'npy', 'mat'}. optional, default = 'binary'
inputFormat of data to be read.
minPartitions: int, optional, default = SparkContext.minParallelism
Minimum number of Spark partitions to use, only for text.
maxPartitionSize : int, optional, default = '32mb'
Maximum size of partitions as a Java-style memory string, e.g. '32mb' or '64mb',
indirectly controls the number of Spark partitions, only for binary.
confFilename: string, optional, default 'conf.json'
Path to JSON file with configuration options including 'nkeys', 'nvalues',
'keyType', and 'valueType'. If a file is not found at the given path, then the base
directory in 'dataPath' will be checked. Parameters will override the conf file.
keyType: string or numpy dtype, optional, default = None
Numerical type of keys, will override conf file.
valueType: string or numpy dtype, optional, default = None
Numerical type of values, will override conf file.
keyPath: string, optional, default = None
Path to file with keys when loading from npy or mat.
varName : str, optional, default = None
Variable name to load (for MAT files only)
Returns
-------
data: thunder.rdds.Series
A Series object, wrapping an RDD, with (n-tuples of ints) : (numpy array) pairs
"""
checkParams(inputFormat, ['text', 'binary', 'npy', 'mat'])
from thunder.rdds.fileio.seriesloader import SeriesLoader
loader = SeriesLoader(self._sc, minPartitions=minPartitions)
if inputFormat.lower() == 'binary':
data = loader.fromBinary(dataPath, confFilename=confFilename, nkeys=nkeys, nvalues=nvalues,
keyType=keyType, valueType=valueType, maxPartitionSize=maxPartitionSize)
elif inputFormat.lower() == 'text':
if nkeys is None:
raise Exception('Must provide number of keys per record for loading from text')
data = loader.fromText(dataPath, nkeys=nkeys)
elif inputFormat.lower() == 'npy':
data = loader.fromNpyLocal(dataPath, keyPath)
else:
if varName is None:
raise Exception('Must provide variable name for loading MAT files')
data = loader.fromMatLocal(dataPath, varName, keyPath)
return data
def loadImages(self, dataPath, dims=None, dtype=None, inputFormat='stack', ext=None,
startIdx=None, stopIdx=None, recursive=False, nplanes=None, npartitions=None,
renumber=False, confFilename='conf.json'):
"""
Loads an Images object from data stored as a binary image stack, tif, or png files.
Supports single files or multiple files, stored on a local file system, a networked file sytem
(mounted and available on all nodes), Amazon S3, or Google Storage.
HDFS is not currently supported for image file data.
Parameters
----------
dataPath: string
Path to data files or directory, as either a local filesystem path or a URI.
May include a single '*' wildcard in the filename. Examples of valid dataPaths include
'local/directory/*.stack", "s3n:///my-s3-bucket/data/", or "file:///mnt/another/directory/".
dims: tuple of positive int, optional (required if inputFormat is 'stack')
Image dimensions. Binary stack data will be interpreted as a multidimensional array
with the given dimensions, and should be stored in row-major order (Fortran or Matlab convention),
where the first dimension changes most rapidly. For 'png' or 'tif' data dimensions
will be read from the image file headers.
inputFormat: str, optional, default = 'stack'
Expected format of the input data: 'stack', 'png', or 'tif'. 'stack' indicates flat binary stacks.
'png' or 'tif' indicate image format. Page of a multipage tif file will be extend along
the third dimension. Separate files interpreted as distinct records, with ordering
given by lexicographic sorting of file names.
ext: string, optional, default = None
File extension, default will be "bin" if inputFormat=="stack", "tif" for inputFormat=='tif',
and 'png' for inputFormat=="png".
dtype: string or numpy dtype, optional, default = 'int16'
Data type of the image files to be loaded, specified as a numpy "dtype" string.
Ignored for 'tif' or 'png' (data will be inferred from image formats).
startIdx: nonnegative int, optional, default = None
Convenience parameters to read only a subset of input files. Uses python slice conventions
(zero-based indexing with exclusive final position). These parameters give the starting
and final index after lexicographic sorting.
stopIdx: nonnegative int, optional, default = None
See startIdx.
recursive: boolean, optional, default = False
If true, will recursively descend directories rooted at dataPath, loading all files
in the tree with an appropriate extension.
nplanes: positive integer, optional, default = None
Subdivide individual image files. Every `nplanes` from each file will be considered a new record.
With nplanes=None (the default), a single file will be considered as representing a single record.
If the number of records per file is not the same across all files, then `renumber` should be set
to True to ensure consistent keys.
npartitions: positive int, optional, default = None
Specify number of partitions for the RDD, if unspecified will use as many partitions
as available cores
renumber: boolean, optional, default = False
Recalculate keys for records after images are loading. Only necessary if different files contain
different number of records (e.g. due to specifying nplanes). See Images.renumber().
confFilename : string, optional, default = 'conf.json'
Name of conf file if using to specify parameters for binary stack data
Returns
-------
data: thunder.rdds.Images
An Images object, wrapping an RDD of with (int) : (numpy array) pairs
"""
checkParams(inputFormat, ['stack', 'png', 'tif', 'tif-stack'])
from thunder.rdds.fileio.imagesloader import ImagesLoader
loader = ImagesLoader(self._sc)
if npartitions is None:
npartitions = self._sc.defaultParallelism
# Checking StartIdx is smaller or equal to StopIdx
if startIdx is not None and stopIdx is not None and startIdx > stopIdx:
raise Exception("Error. startIdx {} is larger than stopIdx {}".inputFormat(startIdx, stopIdx))
if not ext:
ext = DEFAULT_EXTENSIONS.get(inputFormat.lower(), None)
if inputFormat.lower() == 'stack':
data = loader.fromStack(dataPath, dims=dims, dtype=dtype, ext=ext, startIdx=startIdx, stopIdx=stopIdx,
recursive=recursive, nplanes=nplanes, npartitions=npartitions,
confFilename=confFilename)
elif inputFormat.lower().startswith('tif'):
data = loader.fromTif(dataPath, ext=ext, startIdx=startIdx, stopIdx=stopIdx, recursive=recursive,
nplanes=nplanes, npartitions=npartitions)
else:
if nplanes:
raise NotImplementedError("nplanes argument is not supported for png files")
data = loader.fromPng(dataPath, ext=ext, startIdx=startIdx, stopIdx=stopIdx, recursive=recursive,
npartitions=npartitions)
if not renumber:
return data
else:
return data.renumber()
def loadSeriesFromArray(self, values, index=None, npartitions=None):
"""
Load Series data from a local array
Parameters
----------
values : list or ndarray
A list of 1d numpy arrays, or a single 2d numpy array
index : array-like, optional, deafult = None
Index to set for Series object, if None will use linear indices.
npartitions : position int, optional, default = None
Number of partitions for RDD, if unspecified will use
default parallelism.
"""
from numpy import ndarray, asarray
from thunder.rdds.fileio.seriesloader import SeriesLoader
loader = SeriesLoader(self._sc)
if not npartitions:
npartitions = self._sc.defaultParallelism
if isinstance(values, list):
values = asarray(values)
if isinstance(values, ndarray) and values.ndim > 1:
values = list(values)
data = loader.fromArrays(values, npartitions=npartitions)
if index:
data.index = index
return data
def loadImagesFromArray(self, values, npartitions=None):
"""
Load Images data from a local array
Parameters
----------
values : list or ndarray
A list of 2d or 3d numpy arrays,
or a single 3d or 4d numpy array
npartitions : position int, optional, default = None
Number of partitions for RDD, if unspecified will use
default parallelism.
"""
from numpy import ndarray, asarray
from thunder.rdds.fileio.imagesloader import ImagesLoader
loader = ImagesLoader(self._sc)
if isinstance(values, list):
values = asarray(values)
if isinstance(values, ndarray) and values.ndim > 2:
values = list(values)
if not npartitions:
npartitions = self._sc.defaultParallelism
return loader.fromArrays(values, npartitions=npartitions)
def loadImagesOCP(self, bucketName, resolution, server='ocp.me', startIdx=None, stopIdx=None,
minBound=None, maxBound=None):
"""
Load Images from OCP (Open Connectome Project).
The OCP is a web service for access to EM brain images and other neural image data.
The web-service can be accessed at http://www.openconnectomeproject.org/.
Parameters
----------
bucketName: string
Token name for the project in OCP. This name should exist on the server from which data is loaded.
resolution: nonnegative int
Resolution of the data in OCP
server: string, optional, default = 'ocp.me'
Name of the OCP server with the specified token.
startIdx: nonnegative int, optional, default = None
Convenience parameters to read only a subset of input files. Uses python slice conventions
(zero-based indexing with exclusive final position).
stopIdx: nonnegative int, optional
See startIdx.
minBound, maxBound: tuple of nonnegative int, optional, default = None
X,Y,Z bounds of the data to fetch from OCP. minBound contains the (xMin,yMin,zMin) while
maxBound contains (xMax,yMax,zMax).
Returns
-------
data: thunder.rdds.Images
An Images object, wrapping an RDD of with (int) : (numpy array) pairs
"""
from thunder.rdds.fileio.imagesloader import ImagesLoader
loader = ImagesLoader(self._sc)
# Checking StartIdx is smaller or equal to StopIdx
if startIdx is not None and stopIdx is not None and startIdx > stopIdx:
raise Exception("Error. startIdx {} is larger than stopIdx {}".format(startIdx, stopIdx))
data = loader.fromOCP(bucketName, resolution=resolution, server=server, startIdx=startIdx,
stopIdx=stopIdx, minBound=minBound, maxBound=maxBound)
return data
def loadImagesAsSeries(self, dataPath, dims=None, inputFormat='stack', ext=None, dtype='int16',
blockSize="150M", blockSizeUnits="pixels", startIdx=None, stopIdx=None,
recursive=False, nplanes=None, npartitions=None,
renumber=False, confFilename='conf.json'):
"""
Load Images data as Series data.
Parameters
----------
dataPath: string
Path to data files or directory, as either a local filesystem path or a URI.
May include a single '*' wildcard in the filename. Examples of valid dataPaths include
'local/directory/*.stack", "s3n:///my-s3-bucket/data/", or "file:///mnt/another/directory/".
dims: tuple of positive int, optional (required if inputFormat is 'stack')
Image dimensions. Binary stack data will be interpreted as a multidimensional array
with the given dimensions, and should be stored in row-major order (Fortran or Matlab convention),
where the first dimension changes most rapidly. For 'png' or 'tif' data dimensions
will be read from the image file headers.
inputFormat: str, optional, default = 'stack'
Expected format of the input data: 'stack', 'png', or 'tif'. 'stack' indicates flat binary stacks.
'png' or 'tif' indicate image formats. Page of a multipage tif file will be extend along
the third dimension. Separate files interpreted as distinct records, with ordering
given by lexicographic sorting of file names.
ext: string, optional, default = None
File extension, default will be "bin" if inputFormat=="stack", "tif" for inputFormat=='tif',
and 'png' for inputFormat=="png".
dtype: string or numpy dtype. optional, default 'int16'
Data type of the image files to be loaded, specified as a numpy "dtype" string.
Ignored for 'tif' or 'png' (data will be inferred from image formats).
blockSize: string or positive int, optional, default "150M"
Requested size of blocks (e.g "64M", "512k", "2G"). If shuffle=True, can also be a
tuple of int specifying the number of pixels or splits per dimension. Indirectly
controls the number of Spark partitions, with one partition per block.
blockSizeUnits: string, either "pixels" or "splits", default "pixels"
Units for interpreting a tuple passed as blockSize when shuffle=True.
startIdx: nonnegative int, optional, default = None
Convenience parameters to read only a subset of input files. Uses python slice conventions
(zero-based indexing with exclusive final position). These parameters give the starting
and final index after lexicographic sorting.
stopIdx: nonnegative int, optional, default = None
See startIdx.
recursive: boolean, optional, default = False
If true, will recursively descend directories rooted at dataPath, loading all files
in the tree with an appropriate extension.
nplanes: positive integer, optional, default = None
Subdivide individual image files. Every `nplanes` from each file will be considered a new record.
With nplanes=None (the default), a single file will be considered as representing a single record.
If the number of records per file is not the same across all files, then `renumber` should be set
to True to ensure consistent keys.
npartitions: positive int, optional, default = None
Specify number of partitions for the RDD, if unspecified will use 1 partition per image.
renumber: boolean, optional, default = False
Recalculate keys for records after images are loading. Only necessary if different files contain
different number of records (e.g. due to specifying nplanes). See Images.renumber().
confFilename : string, optional, default = 'conf.json'
Name of conf file if using to specify parameters for binary stack data
Returns
-------
data: thunder.rdds.Series
A Series object, wrapping an RDD, with (n-tuples of ints) : (numpy array) pairs.
Keys will be n-tuples of int, with n given by dimensionality of the images, and correspond
to indexes into the image arrays. Value will have length equal to the number of image files.
With each image contributing one point to this value array, with ordering given by
the lexicographic ordering of image file names.
"""
checkParams(inputFormat, ['stack', 'tif', 'tif-stack'])
if not ext:
ext = DEFAULT_EXTENSIONS.get(inputFormat.lower(), None)
from thunder.rdds.fileio.imagesloader import ImagesLoader
loader = ImagesLoader(self._sc)
if inputFormat.lower() == 'stack':
images = loader.fromStack(dataPath, dims, dtype=dtype, ext=ext, startIdx=startIdx, stopIdx=stopIdx,
recursive=recursive, nplanes=nplanes, npartitions=npartitions,
confFilename=confFilename)
else:
# tif / tif stack
images = loader.fromTif(dataPath, ext=ext, startIdx=startIdx, stopIdx=stopIdx,
recursive=recursive, nplanes=nplanes, npartitions=npartitions)
if renumber:
images = images.renumber()
return images.toBlocks(blockSize, units=blockSizeUnits).toSeries()
def convertImagesToSeries(self, dataPath, outputDirPath, dims=None, inputFormat='stack', ext=None,
dtype='int16', blockSize="150M", blockSizeUnits="pixels", startIdx=None, stopIdx=None,
overwrite=False, recursive=False, nplanes=None, npartitions=None,
renumber=False, confFilename='conf.json'):
"""
Write out Images data as Series data, saved in a flat binary format.
The resulting files may subsequently be read in using ThunderContext.loadSeries().
Loading Series data directly will likely be faster than converting image data
to a Series object through loadImagesAsSeries().
Parameters
----------
dataPath: string
Path to data files or directory, as either a local filesystem path or a URI.
May include a single '*' wildcard in the filename. Examples of valid dataPaths include
'local/directory/*.stack", "s3n:///my-s3-bucket/data/", or "file:///mnt/another/directory/".
outputDirPath: string
Path to directory to write Series file output. May be either a path on the local file system
or a URI-like format, such as "local/directory", "s3n:///my-s3-bucket/data/",
or "file:///mnt/another/directory/". If the directory exists and 'overwrite' is True,
the existing directory and all its contents will be deleted and overwritten.
dims: tuple of positive int, optional (required if inputFormat is 'stack')
Image dimensions. Binary stack data will be interpreted as a multidimensional array
with the given dimensions, and should be stored in row-major order (Fortran or Matlab convention),
where the first dimension changes most rapidly. For 'png' or 'tif' data dimensions
will be read from the image file headers.
inputFormat: str, optional, default = 'stack'
Expected format of the input data: 'stack', 'png', or 'tif'. 'stack' indicates flat binary stacks.
'png' or 'tif' indicate image formats. Page of a multipage tif file will be extend along
the third dimension. Separate files interpreted as distinct records, with ordering
given by lexicographic sorting of file names.
ext: string, optional, default = None
File extension, default will be "bin" if inputFormat=="stack", "tif" for inputFormat=='tif',
and 'png' for inputFormat=="png".
dtype: string or numpy dtype. optional, default 'int16'
Data type of the image files to be loaded, specified as a numpy "dtype" string.
Ignored for 'tif' or 'png' (data will be inferred from image formats).
blockSize: string or positive int, optional, default "150M"
Requested size of blocks (e.g "64M", "512k", "2G"). If shuffle=True, can also be a
tuple of int specifying the number of pixels or splits per dimension. Indirectly
controls the number of Spark partitions, with one partition per block.
blockSizeUnits: string, either "pixels" or "splits", default "pixels"
Units for interpreting a tuple passed as blockSize when shuffle=True.
startIdx: nonnegative int, optional, default = None
Convenience parameters to read only a subset of input files. Uses python slice conventions
(zero-based indexing with exclusive final position). These parameters give the starting
and final index after lexicographic sorting.
stopIdx: nonnegative int, optional, default = None
See startIdx.
overwrite: boolean, optional, default False
If true, the directory specified by outputDirPath will be deleted (recursively) if it
already exists. (Use with caution.)
recursive: boolean, optional, default = False
If true, will recursively descend directories rooted at dataPath, loading all files
in the tree with an appropriate extension.
nplanes: positive integer, optional, default = None
Subdivide individual image files. Every `nplanes` from each file will be considered a new record.
With nplanes=None (the default), a single file will be considered as representing a single record.
If the number of records per file is not the same across all files, then `renumber` should be set
to True to ensure consistent keys.
npartitions: positive int, optional, default = None
Specify number of partitions for the RDD, if unspecified will use 1 partition per image.
renumber: boolean, optional, default = False
Recalculate keys for records after images are loading. Only necessary if different files contain
different number of records (e.g. due to specifying nplanes). See Images.renumber().
confFilename : string, optional, default = 'conf.json'
Name of conf file if using to specify parameters for binary stack data
"""
checkParams(inputFormat, ['stack', 'tif', 'tif-stack'])
if not overwrite:
raiseErrorIfPathExists(outputDirPath, awsCredentialsOverride=self._credentials)
overwrite = True # prevent additional downstream checks for this path
if not ext:
ext = DEFAULT_EXTENSIONS.get(inputFormat.lower(), None)
from thunder.rdds.fileio.imagesloader import ImagesLoader
loader = ImagesLoader(self._sc)
if inputFormat.lower() == 'stack':
images = loader.fromStack(dataPath, dims, ext=ext, dtype=dtype, startIdx=startIdx, stopIdx=stopIdx,
recursive=recursive, nplanes=nplanes, npartitions=npartitions,
confFilename=confFilename)
else:
# 'tif' or 'tif-stack'
images = loader.fromTif(dataPath, ext=ext, startIdx=startIdx, stopIdx=stopIdx,
recursive=recursive, nplanes=nplanes, npartitions=npartitions)
if renumber:
images = images.renumber()
images.toBlocks(blockSize, units=blockSizeUnits).saveAsBinarySeries(outputDirPath, overwrite=overwrite)
def makeExample(self, dataset=None, **opts):
"""
Make an example data set for testing analyses.
Options include 'pca', 'factor', 'kmeans', 'ica', 'sources'
See thunder.utils.datasets for detailed options.
Parameters
----------
dataset : str
Which dataset to generate
Returns
-------
data : RDD of (tuple, array) pairs
Generated dataset
"""
from thunder.utils.datasets import DATASET_MAKERS
if dataset is None:
return sorted(DATASET_MAKERS.keys())
checkParams(dataset, DATASET_MAKERS.keys())
return DataSets.make(self._sc, dataset, **opts)
def loadExample(self, dataset=None):
"""
Load a local example data set for testing analyses.
Some of these data sets are extremely downsampled and should be considered
useful only for testing the API. If called with None,
will return list of available datasets.
Parameters
----------
dataset : str
Which dataset to load
Returns
-------
data : Data object
Generated dataset as a Thunder data objects (e.g Series or Images)
"""
import atexit
import shutil
import tempfile
from pkg_resources import resource_listdir, resource_filename
DATASETS = {
'iris': 'iris',
'fish-series': 'fish/series',
'fish-images': 'fish/images',
'mouse-series': 'mouse/series',
'mouse-images': 'mouse/images',
'mouse-params': 'mouse/params'
}
if dataset is None:
return sorted(DATASETS.keys())
checkParams(dataset, DATASETS.keys())
if 'ec2' in self._sc.master:
tmpdir = os.path.join('/root/thunder/thunder/utils', 'data', DATASETS[dataset])
else:
tmpdir = tempfile.mkdtemp()
atexit.register(shutil.rmtree, tmpdir)
def copyLocal(target):
files = resource_listdir('thunder.utils.data', target)
for f in files:
path = resource_filename('thunder.utils.data', os.path.join(target, f))
shutil.copy(path, tmpdir)
copyLocal(DATASETS[dataset])
npartitions = self._sc.defaultParallelism
if dataset == "iris":
return self.loadSeries(tmpdir)
elif dataset == "fish-series":
return self.loadSeries(tmpdir).astype('float')
elif dataset == "fish-images":
return self.loadImages(tmpdir, inputFormat="tif", npartitions=npartitions)
elif dataset == "mouse-series":
return self.loadSeries(tmpdir).astype('float')
elif dataset == "mouse-images":
return self.loadImages(tmpdir, npartitions=npartitions)
elif dataset == "mouse-params":
return self.loadParams(os.path.join(tmpdir, 'covariates.json'))
def loadExampleS3(self, dataset=None):
"""
Load an example data set from S3.
Info on the included datasets can be found at the CodeNeuro data repository
(http://datasets.codeneuro.org/). If called with None, will return
list of available datasets.
Parameters
----------
dataset : str
Which dataset to load
Returns
-------
data : a Data object (usually a Series or Images)
The dataset as one of Thunder's data objects
params : dict
Parameters or metadata for dataset
"""
DATASETS = {
'ahrens.lab/direction.selectivity': 'ahrens.lab/direction.selectivity/1/',
'ahrens.lab/optomotor.response': 'ahrens.lab/optomotor.response/1/',
'svoboda.lab/tactile.navigation': 'svoboda.lab/tactile.navigation/1/'
}
if dataset is None:
return DATASETS.keys()
if 'local' in self._sc.master:
raise Exception("Must be running on an EC2 cluster to load this example data set")
checkParams(dataset, DATASETS.keys())
basePath = 's3n://neuro.datasets/'
dataPath = DATASETS[dataset]
data = self.loadSeries(basePath + dataPath + 'series')
params = self.loadParams(basePath + dataPath + 'params/covariates.json')
return data, params
def loadJSON(self, path):
"""
Generic function for loading JSON from a path, handling local file systems and S3 or GS
Parameters
----------
path : str
Path to a file, can be on a local file system or an S3 or GS bucket
Returns
-------
A string with the JSON
"""
import json
from thunder.rdds.fileio.readers import getFileReaderForPath, FileNotFoundError
from thunder.utils.serializable import _decode_dict
reader = getFileReaderForPath(path)(awsCredentialsOverride=self._credentials)
try:
buffer = reader.read(path)
except FileNotFoundError:
raise Exception("Cannot find file %s" % path)
return json.loads(buffer, object_hook=_decode_dict)
def loadParams(self, path):
"""
Load a file with parameters from a local file system or S3 or GS.
Assumes file is JSON with basic types (strings, integers, doubles, lists),
in either a single dict or list of dict-likes, and each dict has at least
a "name" field and a "value" field.
Useful for loading generic meta data, parameters, covariates, etc.
Parameters
----------
path : str
Path to file, can be on a local file system or an S3 or GS bucket
Returns
-------
A dict or list with the parameters
"""
blob = self.loadJSON(path)
return Params(blob)
def loadSources(self, path):
"""
Load a file with sources from a local file system or S3 or GS.
Parameters
----------
path : str
Path to file, can be on a local file system or an S3 or GS bucket
Returns
-------
A SourceModel
See also
--------
SourceExtraction
"""
from thunder import SourceExtraction
blob = self.loadJSON(path)
return SourceExtraction.deserialize(blob)
def export(self, data, filename, outputFormat=None, overwrite=False, varname=None):
"""
Export local array data to a variety of formats.
Can write to a local file sytem or S3 or GS (destination inferred from filename schema).
S3 or GS writing useful for persisting arrays when working in an environment without
accessible local storage.
Parameters
----------
data : array-like
The data to export
filename : str
Output location (path/to/file.ext)
outputFormat : str, optional, default = None
Ouput format ("npy", "mat", or "txt"), if not provided will
try to infer from file extension.
overwrite : boolean, optional, default = False
Whether to overwrite if directory or file already exists
varname : str, optional, default = None
Variable name for writing "mat" formatted files
"""
from numpy import save, savetxt, asarray
from scipy.io import savemat
from StringIO import StringIO
from thunder.rdds.fileio.writers import getFileWriterForPath
path, file, outputFormat = handleFormat(filename, outputFormat)
checkParams(outputFormat, ["npy", "mat", "txt"])
clazz = getFileWriterForPath(filename)
writer = clazz(path, file, overwrite=overwrite, awsCredentialsOverride=self._credentials)
stream = StringIO()
if outputFormat == "mat":
varname = os.path.splitext(file)[0] if varname is None else varname
savemat(stream, mdict={varname: data}, oned_as='column', do_compression='true')
if outputFormat == "npy":
save(stream, data)
if outputFormat == "txt":
if asarray(data).ndim > 2:
raise Exception("Cannot write data with more than two dimensions to text")
savetxt(stream, data)
stream.seek(0)
writer.writeFile(stream.buf)
def setAWSCredentials(self, awsAccessKeyId, awsSecretAccessKey):
"""
Manually set AWS access credentials to be used by Thunder.
Provided for hosted cloud environments without filesystem access. If
launching a cluster using the thunder-ec2 script, credentials will be
configured automatically (inside core-site.xml and ~/.boto), so this
method should not need to be called.
Parameters
----------
awsAccessKeyId : string
AWS public key, usually starts with "AKIA"
awsSecretAccessKey : string
AWS private key
"""
from thunder.utils.aws import AWSCredentials
self._credentials = AWSCredentials(awsAccessKeyId, awsSecretAccessKey)
self._credentials.setOnContext(self._sc)
DEFAULT_EXTENSIONS = {
"stack": "bin",
"tif": "tif",
"tif-stack": "tif",
"png": "png",
"mat": "mat",
"npy": "npy",
"txt": "txt"
}
| pearsonlab/thunder | thunder/utils/context.py | Python | apache-2.0 | 36,355 | 0.004924 |
# -*- coding: utf-8 -*-
# Copyright 2017, 2021 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for projectq.cengines._main.py."""
import sys
import weakref
import pytest
from projectq.backends import Simulator
from projectq.cengines import BasicMapperEngine, DummyEngine, LocalOptimizer, _main
from projectq.ops import AllocateQubitGate, DeallocateQubitGate, FlushGate, H
def test_main_engine_init():
ceng1 = DummyEngine()
ceng2 = DummyEngine()
test_backend = DummyEngine()
engine_list = [ceng1, ceng2]
eng = _main.MainEngine(backend=test_backend, engine_list=engine_list)
assert id(eng.next_engine) == id(ceng1)
assert id(eng.main_engine) == id(eng)
assert not eng.is_last_engine
assert id(ceng1.next_engine) == id(ceng2)
assert id(ceng1.main_engine) == id(eng)
assert not ceng1.is_last_engine
assert id(ceng2.next_engine) == id(test_backend)
assert id(ceng2.main_engine) == id(eng)
assert not ceng2.is_last_engine
assert test_backend.is_last_engine
assert id(test_backend.main_engine) == id(eng)
assert not test_backend.next_engine
assert len(engine_list) == 2
def test_main_engine_init_failure():
with pytest.raises(_main.UnsupportedEngineError):
_main.MainEngine(backend=DummyEngine)
with pytest.raises(_main.UnsupportedEngineError):
_main.MainEngine(engine_list=DummyEngine)
with pytest.raises(_main.UnsupportedEngineError):
_main.MainEngine(engine_list=[DummyEngine(), DummyEngine])
with pytest.raises(_main.UnsupportedEngineError):
engine = DummyEngine()
_main.MainEngine(backend=engine, engine_list=[engine])
def test_main_engine_init_defaults():
eng = _main.MainEngine()
eng_list = []
current_engine = eng.next_engine
while not current_engine.is_last_engine:
eng_list.append(current_engine)
current_engine = current_engine.next_engine
assert isinstance(eng_list[-1].next_engine, Simulator)
import projectq.setups.default
default_engines = projectq.setups.default.get_engine_list()
for engine, expected in zip(eng_list, default_engines):
assert type(engine) == type(expected)
def test_main_engine_too_many_compiler_engines():
old = _main._N_ENGINES_THRESHOLD
_main._N_ENGINES_THRESHOLD = 3
_main.MainEngine(backend=DummyEngine(), engine_list=[DummyEngine(), DummyEngine()])
with pytest.raises(ValueError):
_main.MainEngine(backend=DummyEngine(), engine_list=[DummyEngine(), DummyEngine(), DummyEngine()])
_main._N_ENGINES_THRESHOLD = old
def test_main_engine_init_mapper():
class LinearMapper(BasicMapperEngine):
pass
mapper1 = LinearMapper()
mapper2 = BasicMapperEngine()
engine_list1 = [mapper1]
eng1 = _main.MainEngine(engine_list=engine_list1)
assert eng1.mapper == mapper1
engine_list2 = [mapper2]
eng2 = _main.MainEngine(engine_list=engine_list2)
assert eng2.mapper == mapper2
engine_list3 = [mapper1, mapper2]
with pytest.raises(_main.UnsupportedEngineError):
_main.MainEngine(engine_list=engine_list3)
def test_main_engine_del():
# Clear previous exceptions of other tests
sys.last_type = None
del sys.last_type
# need engine which caches commands to test that del calls flush
caching_engine = LocalOptimizer(cache_size=5)
backend = DummyEngine(save_commands=True)
eng = _main.MainEngine(backend=backend, engine_list=[caching_engine])
qubit = eng.allocate_qubit()
H | qubit
assert len(backend.received_commands) == 0
eng.__del__()
# Allocate, H, Deallocate, and Flush Gate
assert len(backend.received_commands) == 4
def test_main_engine_set_and_get_measurement_result():
eng = _main.MainEngine()
qubit0 = eng.allocate_qubit()
qubit1 = eng.allocate_qubit()
with pytest.raises(_main.NotYetMeasuredError):
print(int(qubit0))
eng.set_measurement_result(qubit0[0], True)
eng.set_measurement_result(qubit1[0], False)
assert int(qubit0)
assert not int(qubit1)
def test_main_engine_get_qubit_id():
# Test that ids are not identical
eng = _main.MainEngine()
ids = []
for _ in range(10):
ids.append(eng.get_new_qubit_id())
assert len(set(ids)) == 10
def test_main_engine_flush():
backend = DummyEngine(save_commands=True)
eng = _main.MainEngine(backend=backend, engine_list=[DummyEngine()])
qubit = eng.allocate_qubit()
H | qubit
eng.flush()
assert len(backend.received_commands) == 3
assert backend.received_commands[0].gate == AllocateQubitGate()
assert backend.received_commands[1].gate == H
assert backend.received_commands[2].gate == FlushGate()
eng.flush(deallocate_qubits=True)
assert len(backend.received_commands) == 5
assert backend.received_commands[3].gate == DeallocateQubitGate()
# keep the qubit alive until at least here
assert len(str(qubit)) != 0
def test_main_engine_atexit_no_error():
# Clear previous exceptions of other tests
sys.last_type = None
del sys.last_type
backend = DummyEngine(save_commands=True)
eng = _main.MainEngine(backend=backend, engine_list=[])
qb = eng.allocate_qubit() # noqa: F841
eng._delfun(weakref.ref(eng))
assert len(backend.received_commands) == 3
assert backend.received_commands[0].gate == AllocateQubitGate()
assert backend.received_commands[1].gate == DeallocateQubitGate()
assert backend.received_commands[2].gate == FlushGate()
def test_main_engine_atexit_with_error():
sys.last_type = "Something"
backend = DummyEngine(save_commands=True)
eng = _main.MainEngine(backend=backend, engine_list=[])
qb = eng.allocate_qubit() # noqa: F841
eng._delfun(weakref.ref(eng))
assert len(backend.received_commands) == 1
assert backend.received_commands[0].gate == AllocateQubitGate()
def test_exceptions_are_forwarded():
class ErrorEngine(DummyEngine):
def receive(self, command_list):
raise TypeError
eng = _main.MainEngine(backend=ErrorEngine(), engine_list=[])
with pytest.raises(TypeError):
qb = eng.allocate_qubit() # noqa: F841
eng2 = _main.MainEngine(backend=ErrorEngine(), engine_list=[])
with pytest.raises(TypeError):
qb = eng2.allocate_qubit() # noqa: F841
# NB: avoid throwing exceptions when destroying the MainEngine
eng.next_engine = DummyEngine()
eng.next_engine.is_last_engine = True
eng2.next_engine = DummyEngine()
eng2.next_engine.is_last_engine = True
| ProjectQ-Framework/ProjectQ | projectq/cengines/_main_test.py | Python | apache-2.0 | 7,119 | 0.000421 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Agile Business Group sagl (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class Company(models.Model):
_inherit = 'res.company'
website_logo = fields.Binary("Website logo")
| BT-ojossen/website | website_logo/models/company.py | Python | agpl-3.0 | 1,095 | 0 |
'''Module containing a DensityFunc abstract class, with common probability densities
@since: Jan 10, 2013
@author: kroon
'''
from __future__ import division
import numpy as np
class Gaussian(object):
'''
Class for representing a multi-dimensional Gaussian distribution of dimension d,
given mean and covariance.
The covariance matrix has to be positive definite and non-singular.
Parameters
----------
mean : (d,) ndarray
mean of the distribution
cov : (d,d) ndarray
Covariance matrix.
Methods
-------
f
Returns the value of the density function
logf
Returns the log of the density function
likelihood
Returns the likelihood of the data
loglik
Reurns the log-likelihood of the data
sample
Returns samples drawn from the normal distribution with the given
mean and covariance
Example
-------
>>> from density import Gaussian
>>> # Scalar example
>>> mean = [10.]
>>> cov = [[1.]]
>>> ga = Gaussian(mean,cov)
>>> ga.f([10.])
0.398942280401
>>> x = np.array([[10.,10.,10.]])
>>> ga.likelihood(x)
0.0634936359342
>>> # Multivariate example
>>> mean = [10.0, 10.0]
>>> cov = [[ 1. 0.],[ 0. 10.]]
>>> ga = Gaussian(mean,cov)
>>> ga.f(np.array([10.,10.])
0.050329212104487035
>>> x = np.array([[10.,10.,10.,10.],[10.,10.,10.,10.]])
>>> ga.likelihood(x)
6.4162389091777101e-06
'''
def __init__(self, mean=[0.,0.], cov=[[1.,0.],[0.,1.]]):
mean = np.array(mean); cov = np.array(cov)
d,n = cov.shape
self._dim = d
self._mean = mean.flatten()
self._cov = cov
self._covdet = np.linalg.det(2.0*np.pi*cov)
if self._covdet < 10e-12:
raise ValueError('The covariance matrix is singular.')
def f(self, x):
'''
Calculate the value of the normal distributions at x
Parameters
----------
x : (d,) ndarray
Evaluate a single d-dimensional samples x
Returns
-------
val : scalar
The value of the normal distribution at x.
'''
return np.exp(self.logf(x))
def logf(self, x):
'''
Calculate the log-density at x
Parameters
----------
x : (d,) ndarray
Evaluate the log-normal distribution at a single d-dimensional
sample x
Returns
-------
val : scalar
The value of the log of the normal distribution at x.
'''
#x = x[:,np.newaxis]
trans = x - self._mean
mal = -trans.dot(np.linalg.solve(self._cov,trans))/2.
return -0.5*np.log(self._covdet) + mal
def likelihood(self, x):
'''
Calculates the likelihood of the data set x for the normal
distribution.
Parameters
----------
x : (d,n) ndarray
Calculate the likelihood of n, d-dimensional samples
Returns
-------
val : scalar
The likelihood value
'''
return np.exp(self.loglik(x))
def loglik(self, x):
'''
Calculates the log-likelihood of the data set x for the normal
distribution.
Parameters
----------
x : (d,n) ndarray
Calculate the likelihood of n, d-dimensional samples
Returns
-------
val : scalar
The log-likelihood value
'''
return np.sum(np.apply_along_axis(self.logf, 0, x))
def sample(self, n=1):
'''
Calculates n independent points sampled from the normal distribution
Parameters
----------
n : int
The number of samples
Returns
-------
samples : (d,n) ndarray
n, d-dimensional samples
'''
return np.random.multivariate_normal(self._mean, self._cov, n).T
| scjrobertson/xRange | kalman/gaussian.py | Python | gpl-3.0 | 4,271 | 0.012409 |
""" A pair of numbers is considered to be cool if their product is divisible by their sum. More formally,
a pair (i, j) is cool if and only if (i * j) % (i + j) = 0.
Given two lists a and b, find cool pairs with the first number in the pair from a, and the second one from b.
Return the number of different sums of elements in such pairs.
Example
For a = [4, 5, 6, 7, 8] and b = [8, 9, 10, 11, 12], the output should be
coolPairs(a, b) = 2.
There are three cool pairs that can be formed from these arrays: (4, 12), (6, 12) and (8, 8). Their respective
sums are 16, 18 and 16, which means that there are just 2 different sums: 16 and 18. Thus, the output should be
equal to 2.
"""
def coolPairs(a, b):
uniqueSums = {i+j for i in a for j in b if i*j%(i+j) == 0} # CodeFights asks to change this line only
return len(uniqueSums)
| ntthuy11/CodeFights | Arcade/04_Python/05_ComplexityOfComprehension/coolPairs.py | Python | mit | 853 | 0.014068 |
"""Operator classes for eval.
"""
from datetime import datetime
from distutils.version import LooseVersion
from functools import partial
import operator as op
import numpy as np
from pandas._libs.tslibs import Timestamp
from pandas.compat import PY3, string_types, text_type
from pandas.core.dtypes.common import is_list_like, is_scalar
from pandas.core.base import StringMixin
import pandas.core.common as com
from pandas.core.computation.common import _ensure_decoded, _result_type_many
from pandas.core.computation.scope import _DEFAULT_GLOBALS
from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded
_reductions = 'sum', 'prod'
_unary_math_ops = ('sin', 'cos', 'exp', 'log', 'expm1', 'log1p',
'sqrt', 'sinh', 'cosh', 'tanh', 'arcsin', 'arccos',
'arctan', 'arccosh', 'arcsinh', 'arctanh', 'abs', 'log10',
'floor', 'ceil'
)
_binary_math_ops = ('arctan2',)
_mathops = _unary_math_ops + _binary_math_ops
_LOCAL_TAG = '__pd_eval_local_'
class UndefinedVariableError(NameError):
"""NameError subclass for local variables."""
def __init__(self, name, is_local):
if is_local:
msg = 'local variable {0!r} is not defined'
else:
msg = 'name {0!r} is not defined'
super(UndefinedVariableError, self).__init__(msg.format(name))
class Term(StringMixin):
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, string_types) else cls
supr_new = super(Term, klass).__new__
return supr_new(klass)
def __init__(self, name, env, side=None, encoding=None):
self._name = name
self.env = env
self.side = side
tname = text_type(name)
self.is_local = (tname.startswith(_LOCAL_TAG) or
tname in _DEFAULT_GLOBALS)
self._value = self._resolve_name()
self.encoding = encoding
@property
def local_name(self):
return self.name.replace(_LOCAL_TAG, '')
def __unicode__(self):
return pprint_thing(self.name)
def __call__(self, *args, **kwargs):
return self.value
def evaluate(self, *args, **kwargs):
return self
def _resolve_name(self):
res = self.env.resolve(self.local_name, is_local=self.is_local)
self.update(res)
if hasattr(res, 'ndim') and res.ndim > 2:
raise NotImplementedError("N-dimensional objects, where N > 2,"
" are not supported with eval")
return res
def update(self, value):
"""
search order for local (i.e., @variable) variables:
scope, key_variable
[('locals', 'local_name'),
('globals', 'local_name'),
('locals', 'key'),
('globals', 'key')]
"""
key = self.name
# if it's a variable name (otherwise a constant)
if isinstance(key, string_types):
self.env.swapkey(self.local_name, key, new_value=value)
self.value = value
@property
def is_scalar(self):
return is_scalar(self._value)
@property
def type(self):
try:
# potentially very slow for large, mixed dtype frames
return self._value.values.dtype
except AttributeError:
try:
# ndarray
return self._value.dtype
except AttributeError:
# scalar
return type(self._value)
return_type = type
@property
def raw(self):
return pprint_thing('{0}(name={1!r}, type={2})'
''.format(self.__class__.__name__, self.name,
self.type))
@property
def is_datetime(self):
try:
t = self.type.type
except AttributeError:
t = self.type
return issubclass(t, (datetime, np.datetime64))
@property
def value(self):
return self._value
@value.setter
def value(self, new_value):
self._value = new_value
@property
def name(self):
return self._name
@name.setter
def name(self, new_name):
self._name = new_name
@property
def ndim(self):
return self._value.ndim
class Constant(Term):
def __init__(self, value, env, side=None, encoding=None):
super(Constant, self).__init__(value, env, side=side,
encoding=encoding)
def _resolve_name(self):
return self._name
@property
def name(self):
return self.value
def __unicode__(self):
# in python 2 str() of float
# can truncate shorter than repr()
return repr(self.name)
_bool_op_map = {'not': '~', 'and': '&', 'or': '|'}
class Op(StringMixin):
"""Hold an operator of arbitrary arity
"""
def __init__(self, op, operands, *args, **kwargs):
self.op = _bool_op_map.get(op, op)
self.operands = operands
self.encoding = kwargs.get('encoding', None)
def __iter__(self):
return iter(self.operands)
def __unicode__(self):
"""Print a generic n-ary operator and its operands using infix
notation"""
# recurse over the operands
parened = ('({0})'.format(pprint_thing(opr))
for opr in self.operands)
return pprint_thing(' {0} '.format(self.op).join(parened))
@property
def return_type(self):
# clobber types to bool if the op is a boolean operator
if self.op in (_cmp_ops_syms + _bool_ops_syms):
return np.bool_
return _result_type_many(*(term.type for term in com.flatten(self)))
@property
def has_invalid_return_type(self):
types = self.operand_types
obj_dtype_set = frozenset([np.dtype('object')])
return self.return_type == object and types - obj_dtype_set
@property
def operand_types(self):
return frozenset(term.type for term in com.flatten(self))
@property
def is_scalar(self):
return all(operand.is_scalar for operand in self.operands)
@property
def is_datetime(self):
try:
t = self.return_type.type
except AttributeError:
t = self.return_type
return issubclass(t, (datetime, np.datetime64))
def _in(x, y):
"""Compute the vectorized membership of ``x in y`` if possible, otherwise
use Python.
"""
try:
return x.isin(y)
except AttributeError:
if is_list_like(x):
try:
return y.isin(x)
except AttributeError:
pass
return x in y
def _not_in(x, y):
"""Compute the vectorized membership of ``x not in y`` if possible,
otherwise use Python.
"""
try:
return ~x.isin(y)
except AttributeError:
if is_list_like(x):
try:
return ~y.isin(x)
except AttributeError:
pass
return x not in y
_cmp_ops_syms = '>', '<', '>=', '<=', '==', '!=', 'in', 'not in'
_cmp_ops_funcs = op.gt, op.lt, op.ge, op.le, op.eq, op.ne, _in, _not_in
_cmp_ops_dict = dict(zip(_cmp_ops_syms, _cmp_ops_funcs))
_bool_ops_syms = '&', '|', 'and', 'or'
_bool_ops_funcs = op.and_, op.or_, op.and_, op.or_
_bool_ops_dict = dict(zip(_bool_ops_syms, _bool_ops_funcs))
_arith_ops_syms = '+', '-', '*', '/', '**', '//', '%'
_arith_ops_funcs = (op.add, op.sub, op.mul, op.truediv if PY3 else op.div,
op.pow, op.floordiv, op.mod)
_arith_ops_dict = dict(zip(_arith_ops_syms, _arith_ops_funcs))
_special_case_arith_ops_syms = '**', '//', '%'
_special_case_arith_ops_funcs = op.pow, op.floordiv, op.mod
_special_case_arith_ops_dict = dict(zip(_special_case_arith_ops_syms,
_special_case_arith_ops_funcs))
_binary_ops_dict = {}
for d in (_cmp_ops_dict, _bool_ops_dict, _arith_ops_dict):
_binary_ops_dict.update(d)
def _cast_inplace(terms, acceptable_dtypes, dtype):
"""Cast an expression inplace.
Parameters
----------
terms : Op
The expression that should cast.
acceptable_dtypes : list of acceptable numpy.dtype
Will not cast if term's dtype in this list.
.. versionadded:: 0.19.0
dtype : str or numpy.dtype
The dtype to cast to.
"""
dt = np.dtype(dtype)
for term in terms:
if term.type in acceptable_dtypes:
continue
try:
new_value = term.value.astype(dt)
except AttributeError:
new_value = dt.type(term.value)
term.update(new_value)
def is_term(obj):
return isinstance(obj, Term)
class BinOp(Op):
"""Hold a binary operator and its operands
Parameters
----------
op : str
left : Term or Op
right : Term or Op
"""
def __init__(self, op, lhs, rhs, **kwargs):
super(BinOp, self).__init__(op, (lhs, rhs))
self.lhs = lhs
self.rhs = rhs
self._disallow_scalar_only_bool_ops()
self.convert_values()
try:
self.func = _binary_ops_dict[op]
except KeyError:
# has to be made a list for python3
keys = list(_binary_ops_dict.keys())
raise ValueError('Invalid binary operator {0!r}, valid'
' operators are {1}'.format(op, keys))
def __call__(self, env):
"""Recursively evaluate an expression in Python space.
Parameters
----------
env : Scope
Returns
-------
object
The result of an evaluated expression.
"""
# handle truediv
if self.op == '/' and env.scope['truediv']:
self.func = op.truediv
# recurse over the left/right nodes
left = self.lhs(env)
right = self.rhs(env)
return self.func(left, right)
def evaluate(self, env, engine, parser, term_type, eval_in_python):
"""Evaluate a binary operation *before* being passed to the engine.
Parameters
----------
env : Scope
engine : str
parser : str
term_type : type
eval_in_python : list
Returns
-------
term_type
The "pre-evaluated" expression as an instance of ``term_type``
"""
if engine == 'python':
res = self(env)
else:
# recurse over the left/right nodes
left = self.lhs.evaluate(env, engine=engine, parser=parser,
term_type=term_type,
eval_in_python=eval_in_python)
right = self.rhs.evaluate(env, engine=engine, parser=parser,
term_type=term_type,
eval_in_python=eval_in_python)
# base cases
if self.op in eval_in_python:
res = self.func(left.value, right.value)
else:
from pandas.core.computation.eval import eval
res = eval(self, local_dict=env, engine=engine,
parser=parser)
name = env.add_tmp(res)
return term_type(name, env=env)
def convert_values(self):
"""Convert datetimes to a comparable value in an expression.
"""
def stringify(value):
if self.encoding is not None:
encoder = partial(pprint_thing_encoded,
encoding=self.encoding)
else:
encoder = pprint_thing
return encoder(value)
lhs, rhs = self.lhs, self.rhs
if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.is_scalar:
v = rhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = Timestamp(_ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert('UTC')
self.rhs.update(v)
if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.is_scalar:
v = lhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = Timestamp(_ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert('UTC')
self.lhs.update(v)
def _disallow_scalar_only_bool_ops(self):
if ((self.lhs.is_scalar or self.rhs.is_scalar) and
self.op in _bool_ops_dict and
(not (issubclass(self.rhs.return_type, (bool, np.bool_)) and
issubclass(self.lhs.return_type, (bool, np.bool_))))):
raise NotImplementedError("cannot evaluate scalar only bool ops")
def isnumeric(dtype):
return issubclass(np.dtype(dtype).type, np.number)
class Div(BinOp):
"""Div operator to special case casting.
Parameters
----------
lhs, rhs : Term or Op
The Terms or Ops in the ``/`` expression.
truediv : bool
Whether or not to use true division. With Python 3 this happens
regardless of the value of ``truediv``.
"""
def __init__(self, lhs, rhs, truediv, *args, **kwargs):
super(Div, self).__init__('/', lhs, rhs, *args, **kwargs)
if not isnumeric(lhs.return_type) or not isnumeric(rhs.return_type):
raise TypeError("unsupported operand type(s) for {0}:"
" '{1}' and '{2}'".format(self.op,
lhs.return_type,
rhs.return_type))
if truediv or PY3:
# do not upcast float32s to float64 un-necessarily
acceptable_dtypes = [np.float32, np.float_]
_cast_inplace(com.flatten(self), acceptable_dtypes, np.float_)
_unary_ops_syms = '+', '-', '~', 'not'
_unary_ops_funcs = op.pos, op.neg, op.invert, op.invert
_unary_ops_dict = dict(zip(_unary_ops_syms, _unary_ops_funcs))
class UnaryOp(Op):
"""Hold a unary operator and its operands
Parameters
----------
op : str
The token used to represent the operator.
operand : Term or Op
The Term or Op operand to the operator.
Raises
------
ValueError
* If no function associated with the passed operator token is found.
"""
def __init__(self, op, operand):
super(UnaryOp, self).__init__(op, (operand,))
self.operand = operand
try:
self.func = _unary_ops_dict[op]
except KeyError:
raise ValueError('Invalid unary operator {0!r}, valid operators '
'are {1}'.format(op, _unary_ops_syms))
def __call__(self, env):
operand = self.operand(env)
return self.func(operand)
def __unicode__(self):
return pprint_thing('{0}({1})'.format(self.op, self.operand))
@property
def return_type(self):
operand = self.operand
if operand.return_type == np.dtype('bool'):
return np.dtype('bool')
if (isinstance(operand, Op) and
(operand.op in _cmp_ops_dict or operand.op in _bool_ops_dict)):
return np.dtype('bool')
return np.dtype('int')
class MathCall(Op):
def __init__(self, func, args):
super(MathCall, self).__init__(func.name, args)
self.func = func
def __call__(self, env):
operands = [op(env) for op in self.operands]
with np.errstate(all='ignore'):
return self.func.func(*operands)
def __unicode__(self):
operands = map(str, self.operands)
return pprint_thing('{0}({1})'.format(self.op, ','.join(operands)))
class FuncNode(object):
def __init__(self, name):
from pandas.core.computation.check import (_NUMEXPR_INSTALLED,
_NUMEXPR_VERSION)
if name not in _mathops or (
_NUMEXPR_INSTALLED and
_NUMEXPR_VERSION < LooseVersion('2.6.9') and
name in ('floor', 'ceil')
):
raise ValueError(
"\"{0}\" is not a supported function".format(name))
self.name = name
self.func = getattr(np, name)
def __call__(self, *args):
return MathCall(self, args)
| MJuddBooth/pandas | pandas/core/computation/ops.py | Python | bsd-3-clause | 16,387 | 0 |
import sys, os, math
import time
import numpy as np
from pandas.io.parsers import read_csv
from sklearn.decomposition import PCA
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn import metrics
import sklearn.svm as svm
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.lda import LDA
from sklearn.cross_validation import train_test_split
import matplotlib.pyplot as plt
cut_pt = 1
print ("Reading the file...")
input_res = read_csv(os.path.expanduser("input/train.csv"), nrows=3000) # load pandas dataframe
input_res = input_res.as_matrix()
shape = input_res.shape
number_of_rows = shape[0]
number_of_columns = shape[1]
number_of_fv = number_of_columns - cut_pt
print ("Number of rows: %d (document)" % number_of_rows)
print ("Number of columns: %d (feature vector(preprocessed) + topics class labels(preprocessed))" % number_of_columns)
print ("Number of class_labels: %d" % number_of_fv)
# initialize training x and y's
x = input_res[:,cut_pt:number_of_columns]
y = input_res[:,0:cut_pt].transpose().ravel()
x = x / 255.
data = x[0]
print (data)
print (data.shape[0])
img = data.reshape(28, 28)
img = img.astype(np.float32)
plt.imshow(img, cmap="gray")
plt.show()
| flyingpoops/kaggle-digit-recognizer-team-learning | plot.py | Python | apache-2.0 | 1,298 | 0.008475 |
from .connection import Connection
from .group import Group, SerialGroup, ThreadingGroup
from .tasks import task
| github/codeql | python/ql/test/query-tests/Security/lib/fabric/__init__.py | Python | mit | 113 | 0 |
"""
UNFINISHED
Fixer for turning multiple lines like these:
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
into a single line like this:
from __future__ import (absolute_import, division, print_function)
This helps with testing of ``futurize``.
"""
from lib2to3 import fixer_base
from libfuturize.fixer_util import future_import
class FixOrderFutureImports(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "file_input"
run_order = 10
# def match(self, node):
# """
# Match only once per file
# """
# if hasattr(node, 'type') and node.type == syms.file_input:
# return True
# return False
def transform(self, node, results):
# TODO # write me
pass
| thonkify/thonkify | src/lib/libfuturize/fixes/fix_order___future__imports.py | Python | mit | 830 | 0 |
import axi2s_c
import sys
uut = axi2s_c.axi2s_c()
uut.read(sys.argv[1])
| ruishihan/R7-with-notes | src/python/ioread.py | Python | apache-2.0 | 82 | 0.012195 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import utils
import xml_utils
from parser_exception import ParserException
class Event(object):
def __init__(self, node):
""" Constructor
Keyword arguments:
node -- XML node defining this event
"""
self.node = node
self.name = node.get('name')
utils.check_name(self.name)
self.id = None
self.description = None
self.rate = None
self.type = None
def evaluate(self, tree):
if self.node is None:
return
self.id = xml_utils.get_identifier(self.node)
self.description = xml_utils.get_description(self.node)
self.rate = self.node.get('rate')
type = self.node.get('type')
if type is None:
self.type = None
else:
try:
self.type = tree.types[type]
except KeyError as e:
raise ParserException("Type '%s' is not defined. Used by Event '%s')" % (type, self.name))
self.node = None
def update(self, other):
""" Update events with the values from another event
Events are guaranteed to be unique within the evaluted tree. Therefore
an update demand can only be issued for the same events, one declared
in the super-class and the other in the sub-class.
The assert statement checks this, nothing else needs to be done.
"""
assert id(self) == id(other)
def __cmp__(self, other):
return cmp(self.id, other.id) or cmp(self.name, other.name)
def __str__(self):
if self.type is None:
type = None
else:
type = self.type.name
return "[%02x] %s : %s" % (self.id, self.name, type)
| dergraaf/xpcc | tools/system_design/xmlparser/event.py | Python | bsd-3-clause | 1,538 | 0.048114 |
"""
Utility function to facilitate testing.
"""
from __future__ import division, absolute_import, print_function
import os
import sys
import re
import operator
import warnings
from functools import partial
import shutil
import contextlib
from tempfile import mkdtemp, mkstemp
from .nosetester import import_nose
from numpy.core import float32, empty, arange, array_repr, ndarray
from numpy.lib.utils import deprecate
if sys.version_info[0] >= 3:
from io import StringIO
else:
from StringIO import StringIO
__all__ = ['assert_equal', 'assert_almost_equal', 'assert_approx_equal',
'assert_array_equal', 'assert_array_less', 'assert_string_equal',
'assert_array_almost_equal', 'assert_raises', 'build_err_msg',
'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal',
'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure',
'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex',
'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
'SkipTest', 'KnownFailureException', 'temppath', 'tempdir']
class KnownFailureException(Exception):
'''Raise this exception to mark a test as a known failing test.'''
pass
KnownFailureTest = KnownFailureException # backwards compat
# nose.SkipTest is unittest.case.SkipTest
# import it into the namespace, so that it's available as np.testing.SkipTest
try:
from unittest.case import SkipTest
except ImportError:
# on py2.6 unittest.case is not available. Ask nose for a replacement.
SkipTest = import_nose().SkipTest
verbose = 0
def assert_(val, msg=''):
"""
Assert that works in release mode.
Accepts callable msg to allow deferring evaluation until failure.
The Python built-in ``assert`` does not work when executing code in
optimized mode (the ``-O`` flag) - no byte-code is generated for it.
For documentation on usage, refer to the Python documentation.
"""
if not val:
try:
smsg = msg()
except TypeError:
smsg = msg
raise AssertionError(smsg)
def gisnan(x):
"""like isnan, but always raise an error if type not supported instead of
returning a TypeError object.
Notes
-----
isnan and other ufunc sometimes return a NotImplementedType object instead
of raising any exception. This function is a wrapper to make sure an
exception is always raised.
This should be removed once this problem is solved at the Ufunc level."""
from numpy.core import isnan
st = isnan(x)
if isinstance(st, type(NotImplemented)):
raise TypeError("isnan not supported for this type")
return st
def gisfinite(x):
"""like isfinite, but always raise an error if type not supported instead of
returning a TypeError object.
Notes
-----
isfinite and other ufunc sometimes return a NotImplementedType object instead
of raising any exception. This function is a wrapper to make sure an
exception is always raised.
This should be removed once this problem is solved at the Ufunc level."""
from numpy.core import isfinite, errstate
with errstate(invalid='ignore'):
st = isfinite(x)
if isinstance(st, type(NotImplemented)):
raise TypeError("isfinite not supported for this type")
return st
def gisinf(x):
"""like isinf, but always raise an error if type not supported instead of
returning a TypeError object.
Notes
-----
isinf and other ufunc sometimes return a NotImplementedType object instead
of raising any exception. This function is a wrapper to make sure an
exception is always raised.
This should be removed once this problem is solved at the Ufunc level."""
from numpy.core import isinf, errstate
with errstate(invalid='ignore'):
st = isinf(x)
if isinstance(st, type(NotImplemented)):
raise TypeError("isinf not supported for this type")
return st
@deprecate(message="numpy.testing.rand is deprecated in numpy 1.11. "
"Use numpy.random.rand instead.")
def rand(*args):
"""Returns an array of random numbers with the given shape.
This only uses the standard library, so it is useful for testing purposes.
"""
import random
from numpy.core import zeros, float64
results = zeros(args, float64)
f = results.flat
for i in range(len(f)):
f[i] = random.random()
return results
if os.name == 'nt':
# Code "stolen" from enthought/debug/memusage.py
def GetPerformanceAttributes(object, counter, instance=None,
inum=-1, format=None, machine=None):
# NOTE: Many counters require 2 samples to give accurate results,
# including "% Processor Time" (as by definition, at any instant, a
# thread's CPU usage is either 0 or 100). To read counters like this,
# you should copy this function, but keep the counter open, and call
# CollectQueryData() each time you need to know.
# See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp
# My older explanation for this was that the "AddCounter" process forced
# the CPU to 100%, but the above makes more sense :)
import win32pdh
if format is None:
format = win32pdh.PDH_FMT_LONG
path = win32pdh.MakeCounterPath( (machine, object, instance, None, inum, counter))
hq = win32pdh.OpenQuery()
try:
hc = win32pdh.AddCounter(hq, path)
try:
win32pdh.CollectQueryData(hq)
type, val = win32pdh.GetFormattedCounterValue(hc, format)
return val
finally:
win32pdh.RemoveCounter(hc)
finally:
win32pdh.CloseQuery(hq)
def memusage(processName="python", instance=0):
# from win32pdhutil, part of the win32all package
import win32pdh
return GetPerformanceAttributes("Process", "Virtual Bytes",
processName, instance,
win32pdh.PDH_FMT_LONG, None)
elif sys.platform[:5] == 'linux':
def memusage(_proc_pid_stat='/proc/%s/stat' % (os.getpid())):
"""
Return virtual memory size in bytes of the running python.
"""
try:
f = open(_proc_pid_stat, 'r')
l = f.readline().split(' ')
f.close()
return int(l[22])
except:
return
else:
def memusage():
"""
Return memory usage of running python. [Not implemented]
"""
raise NotImplementedError
if sys.platform[:5] == 'linux':
def jiffies(_proc_pid_stat='/proc/%s/stat' % (os.getpid()),
_load_time=[]):
"""
Return number of jiffies elapsed.
Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. See man 5 proc.
"""
import time
if not _load_time:
_load_time.append(time.time())
try:
f = open(_proc_pid_stat, 'r')
l = f.readline().split(' ')
f.close()
return int(l[13])
except:
return int(100*(time.time()-_load_time[0]))
else:
# os.getpid is not in all platforms available.
# Using time is safe but inaccurate, especially when process
# was suspended or sleeping.
def jiffies(_load_time=[]):
"""
Return number of jiffies elapsed.
Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. See man 5 proc.
"""
import time
if not _load_time:
_load_time.append(time.time())
return int(100*(time.time()-_load_time[0]))
def build_err_msg(arrays, err_msg, header='Items are not equal:',
verbose=True, names=('ACTUAL', 'DESIRED'), precision=8):
msg = ['\n' + header]
if err_msg:
if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header):
msg = [msg[0] + ' ' + err_msg]
else:
msg.append(err_msg)
if verbose:
for i, a in enumerate(arrays):
if isinstance(a, ndarray):
# precision argument is only needed if the objects are ndarrays
r_func = partial(array_repr, precision=precision)
else:
r_func = repr
try:
r = r_func(a)
except:
r = '[repr failed]'
if r.count('\n') > 3:
r = '\n'.join(r.splitlines()[:3])
r += '...'
msg.append(' %s: %s' % (names[i], r))
return '\n'.join(msg)
def assert_equal(actual,desired,err_msg='',verbose=True):
"""
Raises an AssertionError if two objects are not equal.
Given two objects (scalars, lists, tuples, dictionaries or numpy arrays),
check that all elements of these objects are equal. An exception is raised
at the first conflicting values.
Parameters
----------
actual : array_like
The object to check.
desired : array_like
The expected object.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal.
Examples
--------
>>> np.testing.assert_equal([4,5], [4,6])
...
<type 'exceptions.AssertionError'>:
Items are not equal:
item=1
ACTUAL: 5
DESIRED: 6
"""
__tracebackhide__ = True # Hide traceback for py.test
if isinstance(desired, dict):
if not isinstance(actual, dict):
raise AssertionError(repr(type(actual)))
assert_equal(len(actual), len(desired), err_msg, verbose)
for k, i in desired.items():
if k not in actual:
raise AssertionError(repr(k))
assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg), verbose)
return
if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)):
assert_equal(len(actual), len(desired), err_msg, verbose)
for k in range(len(desired)):
assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg), verbose)
return
from numpy.core import ndarray, isscalar, signbit
from numpy.lib import iscomplexobj, real, imag
if isinstance(actual, ndarray) or isinstance(desired, ndarray):
return assert_array_equal(actual, desired, err_msg, verbose)
msg = build_err_msg([actual, desired], err_msg, verbose=verbose)
# Handle complex numbers: separate into real/imag to handle
# nan/inf/negative zero correctly
# XXX: catch ValueError for subclasses of ndarray where iscomplex fail
try:
usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
except ValueError:
usecomplex = False
if usecomplex:
if iscomplexobj(actual):
actualr = real(actual)
actuali = imag(actual)
else:
actualr = actual
actuali = 0
if iscomplexobj(desired):
desiredr = real(desired)
desiredi = imag(desired)
else:
desiredr = desired
desiredi = 0
try:
assert_equal(actualr, desiredr)
assert_equal(actuali, desiredi)
except AssertionError:
raise AssertionError(msg)
# Inf/nan/negative zero handling
try:
# isscalar test to check cases such as [np.nan] != np.nan
if isscalar(desired) != isscalar(actual):
raise AssertionError(msg)
# If one of desired/actual is not finite, handle it specially here:
# check that both are nan if any is a nan, and test for equality
# otherwise
if not (gisfinite(desired) and gisfinite(actual)):
isdesnan = gisnan(desired)
isactnan = gisnan(actual)
if isdesnan or isactnan:
if not (isdesnan and isactnan):
raise AssertionError(msg)
else:
if not desired == actual:
raise AssertionError(msg)
return
elif desired == 0 and actual == 0:
if not signbit(desired) == signbit(actual):
raise AssertionError(msg)
# If TypeError or ValueError raised while using isnan and co, just handle
# as before
except (TypeError, ValueError, NotImplementedError):
pass
# Explicitly use __eq__ for comparison, ticket #2552
if not (desired == actual):
raise AssertionError(msg)
def print_assert_equal(test_string, actual, desired):
"""
Test if two objects are equal, and print an error message if test fails.
The test is performed with ``actual == desired``.
Parameters
----------
test_string : str
The message supplied to AssertionError.
actual : object
The object to test for equality against `desired`.
desired : object
The expected result.
Examples
--------
>>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1])
>>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2])
Traceback (most recent call last):
...
AssertionError: Test XYZ of func xyz failed
ACTUAL:
[0, 1]
DESIRED:
[0, 2]
"""
__tracebackhide__ = True # Hide traceback for py.test
import pprint
if not (actual == desired):
msg = StringIO()
msg.write(test_string)
msg.write(' failed\nACTUAL: \n')
pprint.pprint(actual, msg)
msg.write('DESIRED: \n')
pprint.pprint(desired, msg)
raise AssertionError(msg.getvalue())
def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True):
"""
Raises an AssertionError if two items are not equal up to desired
precision.
.. note:: It is recommended to use one of `assert_allclose`,
`assert_array_almost_equal_nulp` or `assert_array_max_ulp`
instead of this function for more consistent floating point
comparisons.
The test is equivalent to ``abs(desired-actual) < 0.5 * 10**(-decimal)``.
Given two objects (numbers or ndarrays), check that all elements of these
objects are almost equal. An exception is raised at conflicting values.
For ndarrays this delegates to assert_array_almost_equal
Parameters
----------
actual : array_like
The object to check.
desired : array_like
The expected object.
decimal : int, optional
Desired precision, default is 7.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_allclose: Compare two array_like objects for equality with desired
relative and/or absolute precision.
assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
Examples
--------
>>> import numpy.testing as npt
>>> npt.assert_almost_equal(2.3333333333333, 2.33333334)
>>> npt.assert_almost_equal(2.3333333333333, 2.33333334, decimal=10)
...
<type 'exceptions.AssertionError'>:
Items are not equal:
ACTUAL: 2.3333333333333002
DESIRED: 2.3333333399999998
>>> npt.assert_almost_equal(np.array([1.0,2.3333333333333]),
... np.array([1.0,2.33333334]), decimal=9)
...
<type 'exceptions.AssertionError'>:
Arrays are not almost equal
<BLANKLINE>
(mismatch 50.0%)
x: array([ 1. , 2.33333333])
y: array([ 1. , 2.33333334])
"""
__tracebackhide__ = True # Hide traceback for py.test
from numpy.core import ndarray
from numpy.lib import iscomplexobj, real, imag
# Handle complex numbers: separate into real/imag to handle
# nan/inf/negative zero correctly
# XXX: catch ValueError for subclasses of ndarray where iscomplex fail
try:
usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
except ValueError:
usecomplex = False
def _build_err_msg():
header = ('Arrays are not almost equal to %d decimals' % decimal)
return build_err_msg([actual, desired], err_msg, verbose=verbose,
header=header)
if usecomplex:
if iscomplexobj(actual):
actualr = real(actual)
actuali = imag(actual)
else:
actualr = actual
actuali = 0
if iscomplexobj(desired):
desiredr = real(desired)
desiredi = imag(desired)
else:
desiredr = desired
desiredi = 0
try:
assert_almost_equal(actualr, desiredr, decimal=decimal)
assert_almost_equal(actuali, desiredi, decimal=decimal)
except AssertionError:
raise AssertionError(_build_err_msg())
if isinstance(actual, (ndarray, tuple, list)) \
or isinstance(desired, (ndarray, tuple, list)):
return assert_array_almost_equal(actual, desired, decimal, err_msg)
try:
# If one of desired/actual is not finite, handle it specially here:
# check that both are nan if any is a nan, and test for equality
# otherwise
if not (gisfinite(desired) and gisfinite(actual)):
if gisnan(desired) or gisnan(actual):
if not (gisnan(desired) and gisnan(actual)):
raise AssertionError(_build_err_msg())
else:
if not desired == actual:
raise AssertionError(_build_err_msg())
return
except (NotImplementedError, TypeError):
pass
if round(abs(desired - actual), decimal) != 0:
raise AssertionError(_build_err_msg())
def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):
"""
Raises an AssertionError if two items are not equal up to significant
digits.
.. note:: It is recommended to use one of `assert_allclose`,
`assert_array_almost_equal_nulp` or `assert_array_max_ulp`
instead of this function for more consistent floating point
comparisons.
Given two numbers, check that they are approximately equal.
Approximately equal is defined as the number of significant digits
that agree.
Parameters
----------
actual : scalar
The object to check.
desired : scalar
The expected object.
significant : int, optional
Desired precision, default is 7.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_allclose: Compare two array_like objects for equality with desired
relative and/or absolute precision.
assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
Examples
--------
>>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20)
>>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20,
significant=8)
>>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20,
significant=8)
...
<type 'exceptions.AssertionError'>:
Items are not equal to 8 significant digits:
ACTUAL: 1.234567e-021
DESIRED: 1.2345672000000001e-021
the evaluated condition that raises the exception is
>>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1)
True
"""
__tracebackhide__ = True # Hide traceback for py.test
import numpy as np
(actual, desired) = map(float, (actual, desired))
if desired == actual:
return
# Normalized the numbers to be in range (-10.0,10.0)
# scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual))))))
with np.errstate(invalid='ignore'):
scale = 0.5*(np.abs(desired) + np.abs(actual))
scale = np.power(10, np.floor(np.log10(scale)))
try:
sc_desired = desired/scale
except ZeroDivisionError:
sc_desired = 0.0
try:
sc_actual = actual/scale
except ZeroDivisionError:
sc_actual = 0.0
msg = build_err_msg([actual, desired], err_msg,
header='Items are not equal to %d significant digits:' %
significant,
verbose=verbose)
try:
# If one of desired/actual is not finite, handle it specially here:
# check that both are nan if any is a nan, and test for equality
# otherwise
if not (gisfinite(desired) and gisfinite(actual)):
if gisnan(desired) or gisnan(actual):
if not (gisnan(desired) and gisnan(actual)):
raise AssertionError(msg)
else:
if not desired == actual:
raise AssertionError(msg)
return
except (TypeError, NotImplementedError):
pass
if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)):
raise AssertionError(msg)
def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
header='', precision=6):
__tracebackhide__ = True # Hide traceback for py.test
from numpy.core import array, isnan, isinf, any, all, inf
x = array(x, copy=False, subok=True)
y = array(y, copy=False, subok=True)
def safe_comparison(*args, **kwargs):
# There are a number of cases where comparing two arrays hits special
# cases in array_richcompare, specifically around strings and void
# dtypes. Basically, we just can't do comparisons involving these
# types, unless both arrays have exactly the *same* type. So
# e.g. you can apply == to two string arrays, or two arrays with
# identical structured dtypes. But if you compare a non-string array
# to a string array, or two arrays with non-identical structured
# dtypes, or anything like that, then internally stuff blows up.
# Currently, when things blow up, we just return a scalar False or
# True. But we also emit a DeprecationWarning, b/c eventually we
# should raise an error here. (Ideally we might even make this work
# properly, but since that will require rewriting a bunch of how
# ufuncs work then we are not counting on that.)
#
# The point of this little function is to let the DeprecationWarning
# pass (or maybe eventually catch the errors and return False, I
# dunno, that's a little trickier and we can figure that out when the
# time comes).
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
return comparison(*args, **kwargs)
def isnumber(x):
return x.dtype.char in '?bhilqpBHILQPefdgFDG'
def chk_same_position(x_id, y_id, hasval='nan'):
"""Handling nan/inf: check that x and y have the nan/inf at the same
locations."""
try:
assert_array_equal(x_id, y_id)
except AssertionError:
msg = build_err_msg([x, y],
err_msg + '\nx and y %s location mismatch:'
% (hasval), verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise AssertionError(msg)
try:
cond = (x.shape == () or y.shape == ()) or x.shape == y.shape
if not cond:
msg = build_err_msg([x, y],
err_msg
+ '\n(shapes %s, %s mismatch)' % (x.shape,
y.shape),
verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
if not cond:
raise AssertionError(msg)
if isnumber(x) and isnumber(y):
x_isnan, y_isnan = isnan(x), isnan(y)
x_isinf, y_isinf = isinf(x), isinf(y)
# Validate that the special values are in the same place
if any(x_isnan) or any(y_isnan):
chk_same_position(x_isnan, y_isnan, hasval='nan')
if any(x_isinf) or any(y_isinf):
# Check +inf and -inf separately, since they are different
chk_same_position(x == +inf, y == +inf, hasval='+inf')
chk_same_position(x == -inf, y == -inf, hasval='-inf')
# Combine all the special values
x_id, y_id = x_isnan, y_isnan
x_id |= x_isinf
y_id |= y_isinf
# Only do the comparison if actual values are left
if all(x_id):
return
if any(x_id):
val = safe_comparison(x[~x_id], y[~y_id])
else:
val = safe_comparison(x, y)
else:
val = safe_comparison(x, y)
if isinstance(val, bool):
cond = val
reduced = [0]
else:
reduced = val.ravel()
cond = reduced.all()
reduced = reduced.tolist()
if not cond:
match = 100-100.0*reduced.count(1)/len(reduced)
msg = build_err_msg([x, y],
err_msg
+ '\n(mismatch %s%%)' % (match,),
verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
if not cond:
raise AssertionError(msg)
except ValueError:
import traceback
efmt = traceback.format_exc()
header = 'error during assertion:\n\n%s\n\n%s' % (efmt, header)
msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise ValueError(msg)
def assert_array_equal(x, y, err_msg='', verbose=True):
"""
Raises an AssertionError if two array_like objects are not equal.
Given two array_like objects, check that the shape is equal and all
elements of these objects are equal. An exception is raised at
shape mismatch or conflicting values. In contrast to the standard usage
in numpy, NaNs are compared like numbers, no assertion is raised if
both objects have NaNs in the same positions.
The usual caution for verifying equality with floating point numbers is
advised.
Parameters
----------
x : array_like
The actual object to check.
y : array_like
The desired, expected object.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired objects are not equal.
See Also
--------
assert_allclose: Compare two array_like objects for equality with desired
relative and/or absolute precision.
assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
Examples
--------
The first assert does not raise an exception:
>>> np.testing.assert_array_equal([1.0,2.33333,np.nan],
... [np.exp(0),2.33333, np.nan])
Assert fails with numerical inprecision with floats:
>>> np.testing.assert_array_equal([1.0,np.pi,np.nan],
... [1, np.sqrt(np.pi)**2, np.nan])
...
<type 'exceptions.ValueError'>:
AssertionError:
Arrays are not equal
<BLANKLINE>
(mismatch 50.0%)
x: array([ 1. , 3.14159265, NaN])
y: array([ 1. , 3.14159265, NaN])
Use `assert_allclose` or one of the nulp (number of floating point values)
functions for these cases instead:
>>> np.testing.assert_allclose([1.0,np.pi,np.nan],
... [1, np.sqrt(np.pi)**2, np.nan],
... rtol=1e-10, atol=0)
"""
assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
verbose=verbose, header='Arrays are not equal')
def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
"""
Raises an AssertionError if two objects are not equal up to desired
precision.
.. note:: It is recommended to use one of `assert_allclose`,
`assert_array_almost_equal_nulp` or `assert_array_max_ulp`
instead of this function for more consistent floating point
comparisons.
The test verifies identical shapes and verifies values with
``abs(desired-actual) < 0.5 * 10**(-decimal)``.
Given two array_like objects, check that the shape is equal and all
elements of these objects are almost equal. An exception is raised at
shape mismatch or conflicting values. In contrast to the standard usage
in numpy, NaNs are compared like numbers, no assertion is raised if
both objects have NaNs in the same positions.
Parameters
----------
x : array_like
The actual object to check.
y : array_like
The desired, expected object.
decimal : int, optional
Desired precision, default is 6.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_allclose: Compare two array_like objects for equality with desired
relative and/or absolute precision.
assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
Examples
--------
the first assert does not raise an exception
>>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan],
[1.0,2.333,np.nan])
>>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
... [1.0,2.33339,np.nan], decimal=5)
...
<type 'exceptions.AssertionError'>:
AssertionError:
Arrays are not almost equal
<BLANKLINE>
(mismatch 50.0%)
x: array([ 1. , 2.33333, NaN])
y: array([ 1. , 2.33339, NaN])
>>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
... [1.0,2.33333, 5], decimal=5)
<type 'exceptions.ValueError'>:
ValueError:
Arrays are not almost equal
x: array([ 1. , 2.33333, NaN])
y: array([ 1. , 2.33333, 5. ])
"""
__tracebackhide__ = True # Hide traceback for py.test
from numpy.core import around, number, float_, result_type, array
from numpy.core.numerictypes import issubdtype
from numpy.core.fromnumeric import any as npany
def compare(x, y):
try:
if npany(gisinf(x)) or npany( gisinf(y)):
xinfid = gisinf(x)
yinfid = gisinf(y)
if not xinfid == yinfid:
return False
# if one item, x and y is +- inf
if x.size == y.size == 1:
return x == y
x = x[~xinfid]
y = y[~yinfid]
except (TypeError, NotImplementedError):
pass
# make sure y is an inexact type to avoid abs(MIN_INT); will cause
# casting of x later.
dtype = result_type(y, 1.)
y = array(y, dtype=dtype, copy=False, subok=True)
z = abs(x-y)
if not issubdtype(z.dtype, number):
z = z.astype(float_) # handle object arrays
return around(z, decimal) <= 10.0**(-decimal)
assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
header=('Arrays are not almost equal to %d decimals' % decimal),
precision=decimal)
def assert_array_less(x, y, err_msg='', verbose=True):
"""
Raises an AssertionError if two array_like objects are not ordered by less
than.
Given two array_like objects, check that the shape is equal and all
elements of the first object are strictly smaller than those of the
second object. An exception is raised at shape mismatch or incorrectly
ordered values. Shape mismatch does not raise if an object has zero
dimension. In contrast to the standard usage in numpy, NaNs are
compared, no assertion is raised if both objects have NaNs in the same
positions.
Parameters
----------
x : array_like
The smaller object to check.
y : array_like
The larger object to compare.
err_msg : string
The error message to be printed in case of failure.
verbose : bool
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired objects are not equal.
See Also
--------
assert_array_equal: tests objects for equality
assert_array_almost_equal: test objects for equality up to precision
Examples
--------
>>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan])
>>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan])
...
<type 'exceptions.ValueError'>:
Arrays are not less-ordered
(mismatch 50.0%)
x: array([ 1., 1., NaN])
y: array([ 1., 2., NaN])
>>> np.testing.assert_array_less([1.0, 4.0], 3)
...
<type 'exceptions.ValueError'>:
Arrays are not less-ordered
(mismatch 50.0%)
x: array([ 1., 4.])
y: array(3)
>>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4])
...
<type 'exceptions.ValueError'>:
Arrays are not less-ordered
(shapes (3,), (1,) mismatch)
x: array([ 1., 2., 3.])
y: array([4])
"""
__tracebackhide__ = True # Hide traceback for py.test
assert_array_compare(operator.__lt__, x, y, err_msg=err_msg,
verbose=verbose,
header='Arrays are not less-ordered')
def runstring(astr, dict):
exec(astr, dict)
def assert_string_equal(actual, desired):
"""
Test if two strings are equal.
If the given strings are equal, `assert_string_equal` does nothing.
If they are not equal, an AssertionError is raised, and the diff
between the strings is shown.
Parameters
----------
actual : str
The string to test for equality against the expected string.
desired : str
The expected string.
Examples
--------
>>> np.testing.assert_string_equal('abc', 'abc')
>>> np.testing.assert_string_equal('abc', 'abcd')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
...
AssertionError: Differences in strings:
- abc+ abcd? +
"""
# delay import of difflib to reduce startup time
__tracebackhide__ = True # Hide traceback for py.test
import difflib
if not isinstance(actual, str):
raise AssertionError(repr(type(actual)))
if not isinstance(desired, str):
raise AssertionError(repr(type(desired)))
if re.match(r'\A'+desired+r'\Z', actual, re.M):
return
diff = list(difflib.Differ().compare(actual.splitlines(1), desired.splitlines(1)))
diff_list = []
while diff:
d1 = diff.pop(0)
if d1.startswith(' '):
continue
if d1.startswith('- '):
l = [d1]
d2 = diff.pop(0)
if d2.startswith('? '):
l.append(d2)
d2 = diff.pop(0)
if not d2.startswith('+ '):
raise AssertionError(repr(d2))
l.append(d2)
if diff:
d3 = diff.pop(0)
if d3.startswith('? '):
l.append(d3)
else:
diff.insert(0, d3)
if re.match(r'\A'+d2[2:]+r'\Z', d1[2:]):
continue
diff_list.extend(l)
continue
raise AssertionError(repr(d1))
if not diff_list:
return
msg = 'Differences in strings:\n%s' % (''.join(diff_list)).rstrip()
if actual != desired:
raise AssertionError(msg)
def rundocs(filename=None, raise_on_error=True):
"""
Run doctests found in the given file.
By default `rundocs` raises an AssertionError on failure.
Parameters
----------
filename : str
The path to the file for which the doctests are run.
raise_on_error : bool
Whether to raise an AssertionError when a doctest fails. Default is
True.
Notes
-----
The doctests can be run by the user/developer by adding the ``doctests``
argument to the ``test()`` call. For example, to run all tests (including
doctests) for `numpy.lib`:
>>> np.lib.test(doctests=True) #doctest: +SKIP
"""
import doctest
import imp
if filename is None:
f = sys._getframe(1)
filename = f.f_globals['__file__']
name = os.path.splitext(os.path.basename(filename))[0]
path = [os.path.dirname(filename)]
file, pathname, description = imp.find_module(name, path)
try:
m = imp.load_module(name, file, pathname, description)
finally:
file.close()
tests = doctest.DocTestFinder().find(m)
runner = doctest.DocTestRunner(verbose=False)
msg = []
if raise_on_error:
out = lambda s: msg.append(s)
else:
out = None
for test in tests:
runner.run(test, out=out)
if runner.failures > 0 and raise_on_error:
raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg))
def raises(*args,**kwargs):
nose = import_nose()
return nose.tools.raises(*args,**kwargs)
def assert_raises(*args,**kwargs):
"""
assert_raises(exception_class, callable, *args, **kwargs)
Fail unless an exception of class exception_class is thrown
by callable when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
thrown, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
Alternatively, `assert_raises` can be used as a context manager:
>>> from numpy.testing import assert_raises
>>> with assert_raises(ZeroDivisionError):
... 1 / 0
is equivalent to
>>> def div(x, y):
... return x / y
>>> assert_raises(ZeroDivisionError, div, 1, 0)
"""
__tracebackhide__ = True # Hide traceback for py.test
nose = import_nose()
return nose.tools.assert_raises(*args,**kwargs)
assert_raises_regex_impl = None
def assert_raises_regex(exception_class, expected_regexp,
callable_obj=None, *args, **kwargs):
"""
Fail unless an exception of class exception_class and with message that
matches expected_regexp is thrown by callable when invoked with arguments
args and keyword arguments kwargs.
Name of this function adheres to Python 3.2+ reference, but should work in
all versions down to 2.6.
"""
__tracebackhide__ = True # Hide traceback for py.test
nose = import_nose()
global assert_raises_regex_impl
if assert_raises_regex_impl is None:
try:
# Python 3.2+
assert_raises_regex_impl = nose.tools.assert_raises_regex
except AttributeError:
try:
# 2.7+
assert_raises_regex_impl = nose.tools.assert_raises_regexp
except AttributeError:
# 2.6
# This class is copied from Python2.7 stdlib almost verbatim
class _AssertRaisesContext(object):
"""A context manager used to implement TestCase.assertRaises* methods."""
def __init__(self, expected, expected_regexp=None):
self.expected = expected
self.expected_regexp = expected_regexp
def failureException(self, msg):
return AssertionError(msg)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
raise self.failureException(
"{0} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
self.exception = exc_value # store for later retrieval
if self.expected_regexp is None:
return True
expected_regexp = self.expected_regexp
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(str(exc_value)):
raise self.failureException(
'"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
return True
def impl(cls, regex, callable_obj, *a, **kw):
mgr = _AssertRaisesContext(cls, regex)
if callable_obj is None:
return mgr
with mgr:
callable_obj(*a, **kw)
assert_raises_regex_impl = impl
return assert_raises_regex_impl(exception_class, expected_regexp,
callable_obj, *args, **kwargs)
def decorate_methods(cls, decorator, testmatch=None):
"""
Apply a decorator to all methods in a class matching a regular expression.
The given decorator is applied to all public methods of `cls` that are
matched by the regular expression `testmatch`
(``testmatch.search(methodname)``). Methods that are private, i.e. start
with an underscore, are ignored.
Parameters
----------
cls : class
Class whose methods to decorate.
decorator : function
Decorator to apply to methods
testmatch : compiled regexp or str, optional
The regular expression. Default value is None, in which case the
nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``)
is used.
If `testmatch` is a string, it is compiled to a regular expression
first.
"""
if testmatch is None:
testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
else:
testmatch = re.compile(testmatch)
cls_attr = cls.__dict__
# delayed import to reduce startup time
from inspect import isfunction
methods = [_m for _m in cls_attr.values() if isfunction(_m)]
for function in methods:
try:
if hasattr(function, 'compat_func_name'):
funcname = function.compat_func_name
else:
funcname = function.__name__
except AttributeError:
# not a function
continue
if testmatch.search(funcname) and not funcname.startswith('_'):
setattr(cls, funcname, decorator(function))
return
def measure(code_str,times=1,label=None):
"""
Return elapsed time for executing code in the namespace of the caller.
The supplied code string is compiled with the Python builtin ``compile``.
The precision of the timing is 10 milli-seconds. If the code will execute
fast on this timescale, it can be executed many times to get reasonable
timing accuracy.
Parameters
----------
code_str : str
The code to be timed.
times : int, optional
The number of times the code is executed. Default is 1. The code is
only compiled once.
label : str, optional
A label to identify `code_str` with. This is passed into ``compile``
as the second argument (for run-time error messages).
Returns
-------
elapsed : float
Total elapsed time in seconds for executing `code_str` `times` times.
Examples
--------
>>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)',
... times=times)
>>> print("Time for a single execution : ", etime / times, "s")
Time for a single execution : 0.005 s
"""
frame = sys._getframe(1)
locs, globs = frame.f_locals, frame.f_globals
code = compile(code_str,
'Test name: %s ' % label,
'exec')
i = 0
elapsed = jiffies()
while i < times:
i += 1
exec(code, globs, locs)
elapsed = jiffies() - elapsed
return 0.01*elapsed
def _assert_valid_refcount(op):
"""
Check that ufuncs don't mishandle refcount of object `1`.
Used in a few regression tests.
"""
import numpy as np
b = np.arange(100*100).reshape(100, 100)
c = b
i = 1
rc = sys.getrefcount(i)
for j in range(15):
d = op(b, c)
assert_(sys.getrefcount(i) >= rc)
del d # for pyflakes
def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=False,
err_msg='', verbose=True):
"""
Raises an AssertionError if two objects are not equal up to desired
tolerance.
The test is equivalent to ``allclose(actual, desired, rtol, atol)``.
It compares the difference between `actual` and `desired` to
``atol + rtol * abs(desired)``.
.. versionadded:: 1.5.0
Parameters
----------
actual : array_like
Array obtained.
desired : array_like
Array desired.
rtol : float, optional
Relative tolerance.
atol : float, optional
Absolute tolerance.
equal_nan : bool, optional.
If True, NaNs will compare equal.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_array_almost_equal_nulp, assert_array_max_ulp
Examples
--------
>>> x = [1e-5, 1e-3, 1e-1]
>>> y = np.arccos(np.cos(x))
>>> assert_allclose(x, y, rtol=1e-5, atol=0)
"""
__tracebackhide__ = True # Hide traceback for py.test
import numpy as np
def compare(x, y):
return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol,
equal_nan=equal_nan)
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
header = 'Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol)
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
verbose=verbose, header=header)
def assert_array_almost_equal_nulp(x, y, nulp=1):
"""
Compare two arrays relatively to their spacing.
This is a relatively robust method to compare two arrays whose amplitude
is variable.
Parameters
----------
x, y : array_like
Input arrays.
nulp : int, optional
The maximum number of unit in the last place for tolerance (see Notes).
Default is 1.
Returns
-------
None
Raises
------
AssertionError
If the spacing between `x` and `y` for one or more elements is larger
than `nulp`.
See Also
--------
assert_array_max_ulp : Check that all items of arrays differ in at most
N Units in the Last Place.
spacing : Return the distance between x and the nearest adjacent number.
Notes
-----
An assertion is raised if the following condition is not met::
abs(x - y) <= nulps * spacing(maximum(abs(x), abs(y)))
Examples
--------
>>> x = np.array([1., 1e-10, 1e-20])
>>> eps = np.finfo(x.dtype).eps
>>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x)
>>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x)
Traceback (most recent call last):
...
AssertionError: X and Y are not equal to 1 ULP (max is 2)
"""
__tracebackhide__ = True # Hide traceback for py.test
import numpy as np
ax = np.abs(x)
ay = np.abs(y)
ref = nulp * np.spacing(np.where(ax > ay, ax, ay))
if not np.all(np.abs(x-y) <= ref):
if np.iscomplexobj(x) or np.iscomplexobj(y):
msg = "X and Y are not equal to %d ULP" % nulp
else:
max_nulp = np.max(nulp_diff(x, y))
msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp)
raise AssertionError(msg)
def assert_array_max_ulp(a, b, maxulp=1, dtype=None):
"""
Check that all items of arrays differ in at most N Units in the Last Place.
Parameters
----------
a, b : array_like
Input arrays to be compared.
maxulp : int, optional
The maximum number of units in the last place that elements of `a` and
`b` can differ. Default is 1.
dtype : dtype, optional
Data-type to convert `a` and `b` to if given. Default is None.
Returns
-------
ret : ndarray
Array containing number of representable floating point numbers between
items in `a` and `b`.
Raises
------
AssertionError
If one or more elements differ by more than `maxulp`.
See Also
--------
assert_array_almost_equal_nulp : Compare two arrays relatively to their
spacing.
Examples
--------
>>> a = np.linspace(0., 1., 100)
>>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a)))
"""
__tracebackhide__ = True # Hide traceback for py.test
import numpy as np
ret = nulp_diff(a, b, dtype)
if not np.all(ret <= maxulp):
raise AssertionError("Arrays are not almost equal up to %g ULP" %
maxulp)
return ret
def nulp_diff(x, y, dtype=None):
"""For each item in x and y, return the number of representable floating
points between them.
Parameters
----------
x : array_like
first input array
y : array_like
second input array
dtype : dtype, optional
Data-type to convert `x` and `y` to if given. Default is None.
Returns
-------
nulp : array_like
number of representable floating point numbers between each item in x
and y.
Examples
--------
# By definition, epsilon is the smallest number such as 1 + eps != 1, so
# there should be exactly one ULP between 1 and 1 + eps
>>> nulp_diff(1, 1 + np.finfo(x.dtype).eps)
1.0
"""
import numpy as np
if dtype:
x = np.array(x, dtype=dtype)
y = np.array(y, dtype=dtype)
else:
x = np.array(x)
y = np.array(y)
t = np.common_type(x, y)
if np.iscomplexobj(x) or np.iscomplexobj(y):
raise NotImplementedError("_nulp not implemented for complex array")
x = np.array(x, dtype=t)
y = np.array(y, dtype=t)
if not x.shape == y.shape:
raise ValueError("x and y do not have the same shape: %s - %s" %
(x.shape, y.shape))
def _diff(rx, ry, vdt):
diff = np.array(rx-ry, dtype=vdt)
return np.abs(diff)
rx = integer_repr(x)
ry = integer_repr(y)
return _diff(rx, ry, t)
def _integer_repr(x, vdt, comp):
# Reinterpret binary representation of the float as sign-magnitude:
# take into account two-complement representation
# See also
# http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
rx = x.view(vdt)
if not (rx.size == 1):
rx[rx < 0] = comp - rx[rx < 0]
else:
if rx < 0:
rx = comp - rx
return rx
def integer_repr(x):
"""Return the signed-magnitude interpretation of the binary representation of
x."""
import numpy as np
if x.dtype == np.float32:
return _integer_repr(x, np.int32, np.int32(-2**31))
elif x.dtype == np.float64:
return _integer_repr(x, np.int64, np.int64(-2**63))
else:
raise ValueError("Unsupported dtype %s" % x.dtype)
# The following two classes are copied from python 2.6 warnings module (context
# manager)
class WarningMessage(object):
"""
Holds the result of a single showwarning() call.
Deprecated in 1.8.0
Notes
-----
`WarningMessage` is copied from the Python 2.6 warnings module,
so it can be used in NumPy with older Python versions.
"""
_WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
"line")
def __init__(self, message, category, filename, lineno, file=None,
line=None):
local_values = locals()
for attr in self._WARNING_DETAILS:
setattr(self, attr, local_values[attr])
if category:
self._category_name = category.__name__
else:
self._category_name = None
def __str__(self):
return ("{message : %r, category : %r, filename : %r, lineno : %s, "
"line : %r}" % (self.message, self._category_name,
self.filename, self.lineno, self.line))
class WarningManager(object):
"""
A context manager that copies and restores the warnings filter upon
exiting the context.
The 'record' argument specifies whether warnings should be captured by a
custom implementation of ``warnings.showwarning()`` and be appended to a
list returned by the context manager. Otherwise None is returned by the
context manager. The objects appended to the list are arguments whose
attributes mirror the arguments to ``showwarning()``.
The 'module' argument is to specify an alternative module to the module
named 'warnings' and imported under that name. This argument is only useful
when testing the warnings module itself.
Deprecated in 1.8.0
Notes
-----
`WarningManager` is a copy of the ``catch_warnings`` context manager
from the Python 2.6 warnings module, with slight modifications.
It is copied so it can be used in NumPy with older Python versions.
"""
def __init__(self, record=False, module=None):
self._record = record
if module is None:
self._module = sys.modules['warnings']
else:
self._module = module
self._entered = False
def __enter__(self):
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
log = []
def showwarning(*args, **kwargs):
log.append(WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return log
else:
return None
def __exit__(self):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
@contextlib.contextmanager
def _assert_warns_context(warning_class, name=None):
__tracebackhide__ = True # Hide traceback for py.test
with warnings.catch_warnings(record=True) as l:
warnings.simplefilter('always')
yield
if not len(l) > 0:
name_str = " when calling %s" % name if name is not None else ""
raise AssertionError("No warning raised" + name_str)
if not l[0].category is warning_class:
name_str = "%s " % name if name is not None else ""
raise AssertionError("First warning %sis not a %s (is %s)"
% (name_str, warning_class, l[0]))
def assert_warns(warning_class, *args, **kwargs):
"""
Fail unless the given callable throws the specified warning.
A warning of class warning_class should be thrown by the callable when
invoked with arguments args and keyword arguments kwargs.
If a different type of warning is thrown, it will not be caught, and the
test case will be deemed to have suffered an error.
If called with all arguments other than the warning class omitted, may be
used as a context manager:
with assert_warns(SomeWarning):
do_something()
The ability to be used as a context manager is new in NumPy v1.11.0.
.. versionadded:: 1.4.0
Parameters
----------
warning_class : class
The class defining the warning that `func` is expected to throw.
func : callable
The callable to test.
\\*args : Arguments
Arguments passed to `func`.
\\*\\*kwargs : Kwargs
Keyword arguments passed to `func`.
Returns
-------
The value returned by `func`.
"""
if not args:
return _assert_warns_context(warning_class)
func = args[0]
args = args[1:]
with _assert_warns_context(warning_class, name=func.__name__):
return func(*args, **kwargs)
@contextlib.contextmanager
def _assert_no_warnings_context(name=None):
__tracebackhide__ = True # Hide traceback for py.test
with warnings.catch_warnings(record=True) as l:
warnings.simplefilter('always')
yield
if len(l) > 0:
name_str = " when calling %s" % name if name is not None else ""
raise AssertionError("Got warnings%s: %s" % (name_str, l))
def assert_no_warnings(*args, **kwargs):
"""
Fail if the given callable produces any warnings.
If called with all arguments omitted, may be used as a context manager:
with assert_no_warnings():
do_something()
The ability to be used as a context manager is new in NumPy v1.11.0.
.. versionadded:: 1.7.0
Parameters
----------
func : callable
The callable to test.
\\*args : Arguments
Arguments passed to `func`.
\\*\\*kwargs : Kwargs
Keyword arguments passed to `func`.
Returns
-------
The value returned by `func`.
"""
if not args:
return _assert_no_warnings_context()
func = args[0]
args = args[1:]
with _assert_no_warnings_context(name=func.__name__):
return func(*args, **kwargs)
def _gen_alignment_data(dtype=float32, type='binary', max_size=24):
"""
generator producing data with different alignment and offsets
to test simd vectorization
Parameters
----------
dtype : dtype
data type to produce
type : string
'unary': create data for unary operations, creates one input
and output array
'binary': create data for unary operations, creates two input
and output array
max_size : integer
maximum size of data to produce
Returns
-------
if type is 'unary' yields one output, one input array and a message
containing information on the data
if type is 'binary' yields one output array, two input array and a message
containing information on the data
"""
ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s'
bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s'
for o in range(3):
for s in range(o + 2, max(o + 3, max_size)):
if type == 'unary':
inp = lambda: arange(s, dtype=dtype)[o:]
out = empty((s,), dtype=dtype)[o:]
yield out, inp(), ufmt % (o, o, s, dtype, 'out of place')
yield inp(), inp(), ufmt % (o, o, s, dtype, 'in place')
yield out[1:], inp()[:-1], ufmt % \
(o + 1, o, s - 1, dtype, 'out of place')
yield out[:-1], inp()[1:], ufmt % \
(o, o + 1, s - 1, dtype, 'out of place')
yield inp()[:-1], inp()[1:], ufmt % \
(o, o + 1, s - 1, dtype, 'aliased')
yield inp()[1:], inp()[:-1], ufmt % \
(o + 1, o, s - 1, dtype, 'aliased')
if type == 'binary':
inp1 = lambda: arange(s, dtype=dtype)[o:]
inp2 = lambda: arange(s, dtype=dtype)[o:]
out = empty((s,), dtype=dtype)[o:]
yield out, inp1(), inp2(), bfmt % \
(o, o, o, s, dtype, 'out of place')
yield inp1(), inp1(), inp2(), bfmt % \
(o, o, o, s, dtype, 'in place1')
yield inp2(), inp1(), inp2(), bfmt % \
(o, o, o, s, dtype, 'in place2')
yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \
(o + 1, o, o, s - 1, dtype, 'out of place')
yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \
(o, o + 1, o, s - 1, dtype, 'out of place')
yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \
(o, o, o + 1, s - 1, dtype, 'out of place')
yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \
(o + 1, o, o, s - 1, dtype, 'aliased')
yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \
(o, o + 1, o, s - 1, dtype, 'aliased')
yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \
(o, o, o + 1, s - 1, dtype, 'aliased')
class IgnoreException(Exception):
"Ignoring this exception due to disabled feature"
@contextlib.contextmanager
def tempdir(*args, **kwargs):
"""Context manager to provide a temporary test folder.
All arguments are passed as this to the underlying tempfile.mkdtemp
function.
"""
tmpdir = mkdtemp(*args, **kwargs)
try:
yield tmpdir
finally:
shutil.rmtree(tmpdir)
@contextlib.contextmanager
def temppath(*args, **kwargs):
"""Context manager for temporary files.
Context manager that returns the path to a closed temporary file. Its
parameters are the same as for tempfile.mkstemp and are passed directly
to that function. The underlying file is removed when the context is
exited, so it should be closed at that time.
Windows does not allow a temporary file to be opened if it is already
open, so the underlying file must be closed after opening before it
can be opened again.
"""
fd, path = mkstemp(*args, **kwargs)
os.close(fd)
try:
yield path
finally:
os.remove(path)
class clear_and_catch_warnings(warnings.catch_warnings):
""" Context manager that resets warning registry for catching warnings
Warnings can be slippery, because, whenever a warning is triggered, Python
adds a ``__warningregistry__`` member to the *calling* module. This makes
it impossible to retrigger the warning in this module, whatever you put in
the warnings filters. This context manager accepts a sequence of `modules`
as a keyword argument to its constructor and:
* stores and removes any ``__warningregistry__`` entries in given `modules`
on entry;
* resets ``__warningregistry__`` to its previous state on exit.
This makes it possible to trigger any warning afresh inside the context
manager without disturbing the state of warnings outside.
For compatibility with Python 3.0, please consider all arguments to be
keyword-only.
Parameters
----------
record : bool, optional
Specifies whether warnings should be captured by a custom
implementation of ``warnings.showwarning()`` and be appended to a list
returned by the context manager. Otherwise None is returned by the
context manager. The objects appended to the list are arguments whose
attributes mirror the arguments to ``showwarning()``.
modules : sequence, optional
Sequence of modules for which to reset warnings registry on entry and
restore on exit
Examples
--------
>>> import warnings
>>> with clear_and_catch_warnings(modules=[np.core.fromnumeric]):
... warnings.simplefilter('always')
... # do something that raises a warning in np.core.fromnumeric
"""
class_modules = ()
def __init__(self, record=False, modules=()):
self.modules = set(modules).union(self.class_modules)
self._warnreg_copies = {}
super(clear_and_catch_warnings, self).__init__(record=record)
def __enter__(self):
for mod in self.modules:
if hasattr(mod, '__warningregistry__'):
mod_reg = mod.__warningregistry__
self._warnreg_copies[mod] = mod_reg.copy()
mod_reg.clear()
return super(clear_and_catch_warnings, self).__enter__()
def __exit__(self, *exc_info):
super(clear_and_catch_warnings, self).__exit__(*exc_info)
for mod in self.modules:
if hasattr(mod, '__warningregistry__'):
mod.__warningregistry__.clear()
if mod in self._warnreg_copies:
mod.__warningregistry__.update(self._warnreg_copies[mod])
| MyRookie/SentimentAnalyse | venv/lib/python2.7/site-packages/numpy/testing/utils.py | Python | mit | 66,431 | 0.001114 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('blogg', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('content', models.TextField(max_length=1000L)),
('author', models.CharField(default=b'Anonymous', max_length=100, blank=True)),
('ip_address', models.GenericIPAddressField(null=True, blank=True)),
('user_agent', models.CharField(max_length=500L, blank=True)),
('published', models.BooleanField(default=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True, auto_now_add=True)),
('post', models.ForeignKey(related_name='comments', to='blogg.Post')),
],
options={
'ordering': ['-created'],
},
bases=(models.Model,),
),
]
| ishahid/django-blogg | source/blogg/migrations/0002_comment.py | Python | mit | 1,206 | 0.004146 |
# Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2014-2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Google, Inc.
# Copyright (c) 2015-2016 Cara Vinson <ceridwenv@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""this module contains a set of functions to create astroid trees from scratch
(build_* functions) or from living object (object_build_* functions)
"""
import inspect
import logging
import os
import sys
import types
import six
from astroid import bases
from astroid import manager
from astroid import node_classes
from astroid import nodes
MANAGER = manager.AstroidManager()
# the keys of CONST_CLS eg python builtin types
_CONSTANTS = tuple(node_classes.CONST_CLS)
_JYTHON = os.name == 'java'
_BUILTINS = vars(six.moves.builtins)
_LOG = logging.getLogger(__name__)
def _io_discrepancy(member):
# _io module names itself `io`: http://bugs.python.org/issue18602
member_self = getattr(member, '__self__', None)
return (member_self and
inspect.ismodule(member_self) and
member_self.__name__ == '_io' and
member.__module__ == 'io')
def _attach_local_node(parent, node, name):
node.name = name # needed by add_local_node
parent.add_local_node(node)
def _add_dunder_class(func, member):
"""Add a __class__ member to the given func node, if we can determine it."""
python_cls = member.__class__
cls_name = getattr(python_cls, '__name__', None)
if not cls_name:
return
cls_bases = [ancestor.__name__ for ancestor in python_cls.__bases__]
ast_klass = build_class(cls_name, cls_bases, python_cls.__doc__)
func.instance_attrs['__class__'] = [ast_klass]
_marker = object()
def attach_dummy_node(node, name, runtime_object=_marker):
"""create a dummy node and register it in the locals of the given
node with the specified name
"""
enode = nodes.EmptyNode()
enode.object = runtime_object
_attach_local_node(node, enode, name)
def _has_underlying_object(self):
return self.object is not None and self.object is not _marker
nodes.EmptyNode.has_underlying_object = _has_underlying_object
def attach_const_node(node, name, value):
"""create a Const node and register it in the locals of the given
node with the specified name
"""
if name not in node.special_attributes:
_attach_local_node(node, nodes.const_factory(value), name)
def attach_import_node(node, modname, membername):
"""create a ImportFrom node and register it in the locals of the given
node with the specified name
"""
from_node = nodes.ImportFrom(modname, [(membername, None)])
_attach_local_node(node, from_node, membername)
def build_module(name, doc=None):
"""create and initialize a astroid Module node"""
node = nodes.Module(name, doc, pure_python=False)
node.package = False
node.parent = None
return node
def build_class(name, basenames=(), doc=None):
"""create and initialize a astroid ClassDef node"""
node = nodes.ClassDef(name, doc)
for base in basenames:
basenode = nodes.Name()
basenode.name = base
node.bases.append(basenode)
basenode.parent = node
return node
def build_function(name, args=None, defaults=None, doc=None):
"""create and initialize a astroid FunctionDef node"""
args, defaults = args or [], defaults or []
# first argument is now a list of decorators
func = nodes.FunctionDef(name, doc)
func.args = argsnode = nodes.Arguments()
argsnode.args = []
for arg in args:
argsnode.args.append(nodes.Name())
argsnode.args[-1].name = arg
argsnode.args[-1].parent = argsnode
argsnode.defaults = []
for default in defaults:
argsnode.defaults.append(nodes.const_factory(default))
argsnode.defaults[-1].parent = argsnode
argsnode.kwarg = None
argsnode.vararg = None
argsnode.parent = func
if args:
register_arguments(func)
return func
def build_from_import(fromname, names):
"""create and initialize an astroid ImportFrom import statement"""
return nodes.ImportFrom(fromname, [(name, None) for name in names])
def register_arguments(func, args=None):
"""add given arguments to local
args is a list that may contains nested lists
(i.e. def func(a, (b, c, d)): ...)
"""
if args is None:
args = func.args.args
if func.args.vararg:
func.set_local(func.args.vararg, func.args)
if func.args.kwarg:
func.set_local(func.args.kwarg, func.args)
for arg in args:
if isinstance(arg, nodes.Name):
func.set_local(arg.name, arg)
else:
register_arguments(func, arg.elts)
def object_build_class(node, member, localname):
"""create astroid for a living class object"""
basenames = [base.__name__ for base in member.__bases__]
return _base_class_object_build(node, member, basenames,
localname=localname)
def object_build_function(node, member, localname):
"""create astroid for a living function object"""
# pylint: disable=deprecated-method; completely removed in 2.0
args, varargs, varkw, defaults = inspect.getargspec(member)
if varargs is not None:
args.append(varargs)
if varkw is not None:
args.append(varkw)
func = build_function(getattr(member, '__name__', None) or localname, args,
defaults, member.__doc__)
node.add_local_node(func, localname)
def object_build_datadescriptor(node, member, name):
"""create astroid for a living data descriptor object"""
return _base_class_object_build(node, member, [], name)
def object_build_methoddescriptor(node, member, localname):
"""create astroid for a living method descriptor object"""
# FIXME get arguments ?
func = build_function(getattr(member, '__name__', None) or localname,
doc=member.__doc__)
# set node's arguments to None to notice that we have no information, not
# and empty argument list
func.args.args = None
node.add_local_node(func, localname)
_add_dunder_class(func, member)
def _base_class_object_build(node, member, basenames, name=None, localname=None):
"""create astroid for a living class object, with a given set of base names
(e.g. ancestors)
"""
klass = build_class(name or getattr(member, '__name__', None) or localname,
basenames, member.__doc__)
klass._newstyle = isinstance(member, type)
node.add_local_node(klass, localname)
try:
# limit the instantiation trick since it's too dangerous
# (such as infinite test execution...)
# this at least resolves common case such as Exception.args,
# OSError.errno
if issubclass(member, Exception):
instdict = member().__dict__
else:
raise TypeError
except: # pylint: disable=bare-except
pass
else:
for item_name, obj in instdict.items():
valnode = nodes.EmptyNode()
valnode.object = obj
valnode.parent = klass
valnode.lineno = 1
klass.instance_attrs[item_name] = [valnode]
return klass
def _build_from_function(node, name, member, module):
# verify this is not an imported function
try:
code = six.get_function_code(member)
except AttributeError:
# Some implementations don't provide the code object,
# such as Jython.
code = None
filename = getattr(code, 'co_filename', None)
if filename is None:
assert isinstance(member, object)
object_build_methoddescriptor(node, member, name)
elif filename != getattr(module, '__file__', None):
attach_dummy_node(node, name, member)
else:
object_build_function(node, member, name)
class InspectBuilder(object):
"""class for building nodes from living object
this is actually a really minimal representation, including only Module,
FunctionDef and ClassDef nodes and some others as guessed.
"""
# astroid from living objects ###############################################
def __init__(self):
self._done = {}
self._module = None
def inspect_build(self, module, modname=None, path=None):
"""build astroid from a living module (i.e. using inspect)
this is used when there is no python source code available (either
because it's a built-in module or because the .py is not available)
"""
self._module = module
if modname is None:
modname = module.__name__
try:
node = build_module(modname, module.__doc__)
except AttributeError:
# in jython, java modules have no __doc__ (see #109562)
node = build_module(modname)
node.file = node.path = os.path.abspath(path) if path else path
node.name = modname
MANAGER.cache_module(node)
node.package = hasattr(module, '__path__')
self._done = {}
self.object_build(node, module)
return node
def object_build(self, node, obj):
"""recursive method which create a partial ast from real objects
(only function, class, and method are handled)
"""
if obj in self._done:
return self._done[obj]
self._done[obj] = node
for name in dir(obj):
try:
member = getattr(obj, name)
except AttributeError:
# damned ExtensionClass.Base, I know you're there !
attach_dummy_node(node, name)
continue
if inspect.ismethod(member):
member = six.get_method_function(member)
if inspect.isfunction(member):
_build_from_function(node, name, member, self._module)
elif inspect.isbuiltin(member):
if (not _io_discrepancy(member) and
self.imported_member(node, member, name)):
continue
object_build_methoddescriptor(node, member, name)
elif inspect.isclass(member):
if self.imported_member(node, member, name):
continue
if member in self._done:
class_node = self._done[member]
if class_node not in node.locals.get(name, ()):
node.add_local_node(class_node, name)
else:
class_node = object_build_class(node, member, name)
# recursion
self.object_build(class_node, member)
if name == '__class__' and class_node.parent is None:
class_node.parent = self._done[self._module]
elif inspect.ismethoddescriptor(member):
assert isinstance(member, object)
object_build_methoddescriptor(node, member, name)
elif inspect.isdatadescriptor(member):
assert isinstance(member, object)
object_build_datadescriptor(node, member, name)
elif isinstance(member, _CONSTANTS):
attach_const_node(node, name, member)
elif inspect.isroutine(member):
# This should be called for Jython, where some builtin
# methods aren't caught by isbuiltin branch.
_build_from_function(node, name, member, self._module)
else:
# create an empty node so that the name is actually defined
attach_dummy_node(node, name, member)
def imported_member(self, node, member, name):
"""verify this is not an imported class or handle it"""
# /!\ some classes like ExtensionClass doesn't have a __module__
# attribute ! Also, this may trigger an exception on badly built module
# (see http://www.logilab.org/ticket/57299 for instance)
try:
modname = getattr(member, '__module__', None)
except: # pylint: disable=bare-except
_LOG.exception('unexpected error while building '
'astroid from living object')
modname = None
if modname is None:
if (name in ('__new__', '__subclasshook__')
or (name in _BUILTINS and _JYTHON)):
# Python 2.5.1 (r251:54863, Sep 1 2010, 22:03:14)
# >>> print object.__new__.__module__
# None
modname = six.moves.builtins.__name__
else:
attach_dummy_node(node, name, member)
return True
real_name = {
'gtk': 'gtk_gtk',
'_io': 'io',
}.get(modname, modname)
if real_name != self._module.__name__:
# check if it sounds valid and then add an import node, else use a
# dummy node
try:
getattr(sys.modules[modname], name)
except (KeyError, AttributeError):
attach_dummy_node(node, name, member)
else:
attach_import_node(node, modname, name)
return True
return False
### astroid bootstrapping ######################################################
Astroid_BUILDER = InspectBuilder()
_CONST_PROXY = {}
def _astroid_bootstrapping(astroid_builtin=None):
"""astroid boot strapping the builtins module"""
# this boot strapping is necessary since we need the Const nodes to
# inspect_build builtins, and then we can proxy Const
if astroid_builtin is None:
from six.moves import builtins
astroid_builtin = Astroid_BUILDER.inspect_build(builtins)
# pylint: disable=redefined-outer-name
for cls, node_cls in node_classes.CONST_CLS.items():
if cls is type(None):
proxy = build_class('NoneType')
proxy.parent = astroid_builtin
elif cls is type(NotImplemented):
proxy = build_class('NotImplementedType')
proxy.parent = astroid_builtin
else:
proxy = astroid_builtin.getattr(cls.__name__)[0]
if cls in (dict, list, set, tuple):
node_cls._proxied = proxy
else:
_CONST_PROXY[cls] = proxy
_astroid_bootstrapping()
# TODO : find a nicer way to handle this situation;
# However __proxied introduced an
# infinite recursion (see https://bugs.launchpad.net/pylint/+bug/456870)
def _set_proxied(const):
return _CONST_PROXY[const.value.__class__]
nodes.Const._proxied = property(_set_proxied)
_GeneratorType = nodes.ClassDef(types.GeneratorType.__name__, types.GeneratorType.__doc__)
_GeneratorType.parent = MANAGER.astroid_cache[six.moves.builtins.__name__]
bases.Generator._proxied = _GeneratorType
Astroid_BUILDER.object_build(bases.Generator._proxied, types.GeneratorType)
_builtins = MANAGER.astroid_cache[six.moves.builtins.__name__]
BUILTIN_TYPES = (types.GetSetDescriptorType, types.GeneratorType,
types.MemberDescriptorType, type(None), type(NotImplemented),
types.FunctionType, types.MethodType,
types.BuiltinFunctionType, types.ModuleType, types.TracebackType)
for _type in BUILTIN_TYPES:
if _type.__name__ not in _builtins:
cls = nodes.ClassDef(_type.__name__, _type.__doc__)
cls.parent = MANAGER.astroid_cache[six.moves.builtins.__name__]
Astroid_BUILDER.object_build(cls, _type)
_builtins[_type.__name__] = cls
| arju88nair/projectCulminate | venv/lib/python3.5/site-packages/astroid/raw_building.py | Python | apache-2.0 | 15,733 | 0.001525 |
# coding: utf-8
"""
HDL Testing Platform
REST API for HDL TP # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.task import Task # noqa: E501
from swagger_client.rest import ApiException
class TestTask(unittest.TestCase):
"""Task unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTask(self):
"""Test Task"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.task.Task() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| autosub-team/autosub | src/plugins/vels_ob/test/test_task.py | Python | gpl-2.0 | 776 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('discussions', '0004_auto_20150430_1641'),
]
operations = [
migrations.AlterField(
model_name='discussion',
name='original_post',
field=models.OneToOneField(null=True, to='discussions.Post', related_name='OP'),
),
]
| ZackYovel/studybuddy | server/studybuddy/discussions/migrations/0005_auto_20150430_1645.py | Python | mit | 459 | 0.002179 |
'''
File: input.py
Author: Tristan van Vaalen
Handles user input
'''
import signal
import sys
import verbose
v = verbose.Verbose()
class InputHandler():
def __init__(self):
v.debug('Initializing input handler').indent()
self.running = True
self.signal_level = 0
v.debug('Registering signal handler').unindent()
signal.signal(signal.SIGINT, self.signal_handler)
def test(self):
pass
def signal_handler(self, signal, frame):
self.signal_level += 1
if self.signal_level == 1:
self.running = False
else:
sys.exit(0)
def output_options(self):
v.write(
'Available options:\n' +
' - help: prints this message\n' +
' - exit: exit program'
' - test: magic'
)
def get(self):
v.debug('Entering input loop')
v.write('AUDIOLYZE v0.01\nPress ctrl+D to exit')
while self.running:
try:
self._parse_input(raw_input('>>> '))
except EOFError:
v.write('EOF received')
self.running = False
v.write('Goodbye')
def _parse_input(self, raw):
raw = raw.strip()
if raw in ['help', 'h', '?']:
self.output_options()
elif raw in ['quit', 'exit', 'stop', 'abort']:
self.running = False
elif raw in ['test']:
self.test()
else:
v.write(
'Invalid command \'{}\'. Try \'help\' for a list of commands'
.format(raw)
)
| Scoudem/audiolyze | inputhandler.py | Python | mit | 1,620 | 0 |
import networkx as nx
import itertools
import matplotlib.pyplot as plt
fig = plt.figure()
fig.subplots_adjust(left=0.2, wspace=0.6)
G = nx.Graph()
G.add_edges_from([(1,2,{'w': 6}),
(2,3,{'w': 3}),
(3,1,{'w': 4}),
(3,4,{'w': 12}),
(4,5,{'w': 13}),
(5,3,{'w': 11}),
])
import pprint
# detect triangles
triangles = []
for trio in itertools.combinations(G.nodes(), 3):
vertices = []
for v in itertools.combinations(trio, 2):
vertice = G.get_edge_data(*v)
if vertice:
vertices.append(v)
if len(vertices)==3:
triangles.append(vertices)
pos = nx.spring_layout(G)
graph1 = fig.add_subplot(121)
# graph1.plot(nx.draw_networkx_nodes(G, pos=pos, node_size=[G.degree(n) for n in G.nodes()], label=True, alpha=0.75),
# nx.draw_networkx_edges(G, pos=pos, width=[G.get_edge_data(*e)['w'] for e in G.edges()], alpha=0.75))
graph1.plot(nx.draw(G,
pos=pos,
node_size = [G.degree(n) for n in G.nodes()],
width = [G.get_edge_data(*e)['w'] for e in G.edges()],
edge_color = [G.get_edge_data(*e)['w'] for e in G.edges()] ))
#plt.show()
for t in triangles:
weights = {}
for v in t:
k = (G.get_edge_data(*v)['w'])
weights[k]=v
l = weights.keys()
if len(l) != 1:
l.sort()
l.reverse()
pprint.pprint(l)
quitar = l.pop()
G.remove_edge(*weights[quitar])
graph2 = fig.add_subplot(122)
graph2.plot(nx.draw(G,
pos=pos,
node_size = [G.degree(n) for n in G.nodes()],
width = [G.get_edge_data(*e)['w'] for e in G.edges()],
edge_color = [G.get_edge_data(*e)['w'] for e in G.edges()] ))
plt.show()
| CSB-IG/natk | ninnx/pruning/mi_triangles.py | Python | gpl-3.0 | 1,793 | 0.026771 |
#!/usr/bin/env python
import glob, os, sys
import sipconfig
from PyQt4 import pyqtconfig
def get_diana_version():
depends = filter(lambda line: line.startswith("Depends:"),
open("debian/control").readlines())
for line in depends:
pieces = line.split()
for piece in pieces:
name_pieces = piece.strip(",").split("-")
if len(name_pieces) == 2 and name_pieces[0] == "diana":
return name_pieces[1]
return None
def get_python_diana_version():
line = open("debian/changelog").readline()
pieces = line.split()
return pieces[1][1:-1]
if __name__ == "__main__":
if len(sys.argv) not in (1, 3, 5):
sys.stderr.write("Usage: %s [<directory containing diana headers> <directory containing libdiana>] "
"[<directory containing metlibs headers> <directory containing metlibs libraries>]\n" % sys.argv[0])
sys.exit(1)
if len(sys.argv) == 5:
metlibs_inc_dir = sys.argv[3]
metlibs_lib_dir = sys.argv[4]
else:
metlibs_inc_dir = "/usr/include/metlibs"
metlibs_lib_dir = "/usr/lib"
if len(sys.argv) >= 3:
diana_inc_dir = sys.argv[1]
diana_lib_dir = sys.argv[2]
else:
diana_inc_dir = "/usr/include/diana"
diana_lib_dir = "/usr/lib"
qt_pkg_dir = os.getenv("qt_pkg_dir")
python_diana_pkg_dir = os.getenv("python_diana_pkg_dir")
dest_pkg_dir = os.path.join(python_diana_pkg_dir, "metno")
config = pyqtconfig.Configuration()
# The name of the SIP build file generated by SIP and used by the build
# system.
sip_files_dir = "sip"
modules = ["std", "metlibs", "diana"]
if not os.path.exists("modules"):
os.mkdir("modules")
# Run SIP to generate the code.
output_dirs = []
for module in modules:
output_dir = os.path.join("modules", module)
build_file = module + ".sbf"
build_path = os.path.join(output_dir, build_file)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
sip_file = os.path.join("sip", module, module+".sip")
command = " ".join([config.sip_bin,
"-c", output_dir,
"-b", build_path,
"-I"+config.sip_inc_dir,
"-I"+config.pyqt_sip_dir,
"-I"+diana_inc_dir,
"-I/usr/include",
"-I"+metlibs_inc_dir,
"-I"+qt_pkg_dir+"/include",
"-I"+qt_pkg_dir+"/share/sip/PyQt4",
"-Isip",
config.pyqt_sip_flags,
"-w",
"-o", # generate docstrings for signatures
sip_file])
sys.stdout.write(command+"\n")
sys.stdout.flush()
if os.system(command) != 0:
sys.exit(1)
# Create the Makefile (within the diana directory).
makefile = pyqtconfig.QtGuiModuleMakefile(
config, build_file, dir=output_dir,
install_dir=dest_pkg_dir,
qt=["QtCore", "QtGui", "QtNetwork", "QtXml", "QtXmlPatterns"]
)
if module == "diana":
makefile.extra_include_dirs += [
diana_inc_dir,
os.path.join(diana_inc_dir, "PaintGL"),
metlibs_inc_dir,
qt_pkg_dir+"/include"
]
makefile.extra_lib_dirs += [diana_lib_dir, qt_pkg_dir+"/lib"]
makefile.extra_lflags += ["-Wl,-rpath="+diana_lib_dir, "-Wl,-fPIC"]
makefile.extra_libs += ["diana"]
if module == "metlibs":
makefile.extra_include_dirs.append(diana_inc_dir)
makefile.extra_include_dirs.append("/usr/include/metlibs")
makefile.extra_lib_dirs += [diana_lib_dir, "/usr/lib", metlibs_lib_dir, qt_pkg_dir+"/lib"]
makefile.extra_lflags += ["-Wl,-rpath="+diana_lib_dir, "-Wl,-fPIC"]
makefile.extra_libs += ["miLogger", "coserver", "diana"]
makefile.generate()
output_dirs.append(output_dir)
# Update the metno package version.
diana_version = get_diana_version()
python_diana_version = get_python_diana_version()
if not diana_version or not python_diana_version:
sys.stderr.write("Failed to find version information for Diana (%s) "
"or python-diana (%s)\n" % (repr(diana_version),
repr(python_diana_version)))
sys.exit(1)
f = open("python/metno/versions.py", "w")
f.write('\ndiana_version = "%s"\npython_diana_version = "%s"\n' % (
diana_version, python_diana_version))
# Generate the top-level Makefile.
python_files = glob.glob(os.path.join("python", "metno", "*.py"))
sipconfig.ParentMakefile(
configuration = config,
subdirs = output_dirs,
installs = [(python_files, dest_pkg_dir)]
).generate()
sys.exit()
| dboddie/python-diana | configure.py | Python | gpl-2.0 | 5,321 | 0.008081 |
import unittest
from flip_bit_to_win import flip_bit
class TestFlipBit(unittest.TestCase):
def test_flip_bit(self):
self.assertEquals(flip_bit(0b1011100101), 4)
self.assertEquals(flip_bit(1775), 8)
if __name__ == '__main__':
unittest.main()
| heitorschueroff/ctci | ch5/5.03_Flip_Bit_To_Win/test_flip_bit_to_win.py | Python | mit | 269 | 0 |
#
# rtlsdr_scan
#
# http://eartoearoak.com/software/rtlsdr-scanner
#
# Copyright 2012 - 2015 Al Brown
#
# A frequency scanning GUI for the OsmoSDR rtl-sdr library at
# http://sdr.osmocom.org/trac/wiki/rtl-sdr
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import datetime
import json
from math import radians, sin, cos, asin, sqrt
import math
import os
import socket
import sys
from threading import Thread
import time
import urllib
import serial.tools.list_ports
from constants import SAMPLE_RATE, TIMESTAMP_FILE
class RemoteControl(object):
def __init__(self):
self.connected = False
self.socket = None
def __connect(self):
if not self.connected:
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.settimeout(1)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket.connect(('localhost', 3382))
self.connected = True
except socket.error:
self.connected = False
def __thread(self, command):
self.__connect()
if self.connected:
try:
self.socket.send(json.dumps(command))
self.socket.send('\r\n')
except socket.error:
self.socket.close()
self.connected = False
def __send(self, command):
thread = Thread(target=self.__thread, args=(command,))
thread.daemon = True
thread.start()
def tune(self, frequency):
command = {'Command': 'Set',
'Method': 'Frequency',
'Value': frequency}
self.__send(command)
def get_script_dir():
if not hasattr(sys, 'frozen'):
scriptDir = os.path.dirname(os.path.realpath(sys.argv[0]))
else:
scriptDir = sys._MEIPASS
return scriptDir
def get_resdir():
scriptDir = get_script_dir()
if os.path.isdir(os.path.join(scriptDir, 'res')):
resDir = os.path.join(scriptDir, 'res')
else:
resDir = os.path.join(scriptDir, '..', 'res')
return resDir
def get_resource_path(resource):
return os.path.join(get_resdir(), resource)
def limit(value, minimum, maximum):
return max(min(maximum, value), minimum)
def level_to_db(level):
return 10 * math.log10(level)
def db_to_level(dB):
return math.pow(10, dB / 10.0)
def next_2_to_pow(val):
val -= 1
val |= val >> 1
val |= val >> 2
val |= val >> 4
val |= val >> 8
val |= val >> 16
return val + 1
def calc_samples(dwell):
samples = dwell * SAMPLE_RATE
samples = next_2_to_pow(int(samples))
return samples
def calc_real_dwell(dwell):
samples = calc_samples(dwell)
dwellReal = samples / SAMPLE_RATE
return (int)(dwellReal * 1000.0) / 1000.0
def nearest(value, values):
offset = [abs(value - v) for v in values]
return values[offset.index(min(offset))]
def haversine(lat1, lat2, lon1, lon2):
lat1, lat2, lon1, lon2 = map(radians, [lat1, lat2, lon1, lon2])
dlon = lon1 - lon2
dlat = lat1 - lat2
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
b = asin(sqrt(a))
return 2 * b * 6371000
def format_precision(settings, freq=None, level=None,
units=True, fancyUnits=False):
textFreq = None
textLevel = None
if freq is not None:
prec = settings.precisionFreq
width = 4 + prec
textFreq = '{:{width}.{prec}f}'.format(freq, width=width, prec=prec)
if units or fancyUnits:
textFreq += " MHz"
if level is not None:
prec = settings.precisionLevel
width = 4 + prec
textLevel = '{:.{prec}f}'.format(level, width=width, prec=prec)
if fancyUnits:
textLevel += r" $\mathsf{{dB/\sqrt{{Hz}}}}$"
elif units:
textLevel += " dB/Hz"
if textFreq and textLevel:
return (textFreq, textLevel)
if textFreq:
return textFreq
if textLevel:
return textLevel
return None
def format_time(timeStamp, withDate=False):
if timeStamp <= 1:
return 'Unknown'
if withDate:
return time.strftime('%c', time.localtime(timeStamp))
return time.strftime('%H:%M:%S', time.localtime(timeStamp))
def format_iso_time(timeStamp):
dt = datetime.datetime.utcfromtimestamp(timeStamp)
return dt.isoformat() + 'Z'
def set_version_timestamp():
scriptDir = get_script_dir()
timeStamp = str(int(time.time()))
f = open(os.path.join(scriptDir, TIMESTAMP_FILE), 'w')
f.write(timeStamp)
f.close()
def get_version_timestamp(asSeconds=False):
scriptDir = get_script_dir()
f = open(os.path.join(scriptDir, TIMESTAMP_FILE), 'r')
timeStamp = int(f.readline())
f.close()
if asSeconds:
return timeStamp
else:
return format_time(timeStamp, True)
def get_version_timestamp_repo():
f = urllib.urlopen('https://raw.github.com/EarToEarOak/RTLSDR-Scanner/master/src/version-timestamp')
timeStamp = int(f.readline())
f.close()
return timeStamp
def get_serial_ports():
ports = [port[0] for port in serial.tools.list_ports.comports()]
if len(ports) == 0:
if os.name == 'nt':
ports.append('COM1')
else:
ports.append('/dev/ttyS0')
return ports
def limit_to_ascii(text):
return ''.join([i if ord(i) < 128 else '' for i in text])
if __name__ == '__main__':
print 'Please run rtlsdr_scan.py'
exit(1)
| mvdroest/RTLSDR-Scanner | src/misc.py | Python | gpl-3.0 | 6,123 | 0.000327 |
"""
Testing python specific API for Minimizer related classes.
"""
import sys
import os
import unittest
import bornagain as ba
class TestMinimizerHelper:
def __init__(self):
self.m_ncalls = 0
self.m_pars = None
def objective_function(self, pars):
self.m_ncalls += 1
self.m_pars = pars
return 42.0
class MinimizerAPITest(unittest.TestCase):
def test_ParameterAttribute(self):
"""
Testing p.value attribute
"""
par = ba.Parameter("par", 1.0)
self.assertEqual(par.value, 1.0)
par.value = 42.0
self.assertEqual(par.value, 42.0)
def test_ParametersSetIterator(self):
"""
Testing of python iterator over defined fit parameters.
"""
pars = ba.Parameters()
self.assertEqual(pars.size(), 0)
pars.add(ba.Parameter("par0", 1.0, ba.AttLimits.limitless()))
pars.add(ba.Parameter("par1", 2.0, ba.AttLimits.limitless()))
expected_names = ["par0", "par1"]
for index, p in enumerate(pars):
self.assertEqual(p.name(), expected_names[index])
def test_ParametersAdd(self):
"""
Testing Parameters::add method
"""
params = ba.Parameters()
params.add("par0", 0.0)
params.add("par1", 1.0, min=1.0)
params.add("par2", 2.0, max=2.0)
params.add("par3", 3.0, min=1.0, max=2.0)
params.add("par4", 4.0, vary=False)
self.assertTrue(params["par0"].limits().isLimitless())
self.assertTrue(params["par1"].limits().isLowerLimited())
self.assertEqual(params["par1"].limits().lowerLimit(), 1.0)
self.assertTrue(params["par2"].limits().isUpperLimited())
self.assertEqual(params["par2"].limits().upperLimit(), 2.0)
self.assertTrue(params["par3"].limits().isLimited())
self.assertEqual(params["par3"].limits().lowerLimit(), 1.0)
self.assertEqual(params["par3"].limits().upperLimit(), 2.0)
self.assertTrue(params["par4"].limits().isFixed())
def test_SimpleMinimizer(self):
minimizer = ba.Minimizer()
minimizer.setMinimizer("Test")
pars = ba.Parameters()
pars.add(ba.Parameter("par0", 0.0))
pars.add(ba.Parameter("par1", 1.0))
pars.add(ba.Parameter("par2", 2.0))
helper = TestMinimizerHelper()
result = minimizer.minimize(helper.objective_function, pars)
# return value of objective function was propagated to MinimizerResult
self.assertEqual(result.minValue(), 42.0)
# objective function was called twice
#(once by test minimizer, and second time during return type deduction)
self.assertEqual(helper.m_ncalls, 2)
# starting values of fit parameters were correctly send to objective func
self.assertEqual(list(helper.m_pars.values()), [0.0, 1.0, 2.0])
if __name__ == '__main__':
unittest.main()
| gpospelov/BornAgain | Tests/Functional/PyFit/minimizer_api.py | Python | gpl-3.0 | 2,947 | 0.000679 |
from django.contrib.messages import constants, get_level, set_level, utils
from django.contrib.messages.api import MessageFailure
from django.contrib.messages.constants import DEFAULT_LEVELS
from django.contrib.messages.storage import base, default_storage
from django.contrib.messages.storage.base import Message
from django.http import HttpRequest, HttpResponse
from django.test import modify_settings, override_settings
from django.urls import reverse
from django.utils.translation import gettext_lazy
def add_level_messages(storage):
"""
Add 6 messages from different levels (including a custom one) to a storage
instance.
"""
storage.add(constants.INFO, 'A generic info message')
storage.add(29, 'Some custom level')
storage.add(constants.DEBUG, 'A debugging message', extra_tags='extra-tag')
storage.add(constants.WARNING, 'A warning')
storage.add(constants.ERROR, 'An error')
storage.add(constants.SUCCESS, 'This was a triumph.')
class override_settings_tags(override_settings):
def enable(self):
super().enable()
# LEVEL_TAGS is a constant defined in the
# django.contrib.messages.storage.base module, so after changing
# settings.MESSAGE_TAGS, update that constant also.
self.old_level_tags = base.LEVEL_TAGS
base.LEVEL_TAGS = utils.get_level_tags()
def disable(self):
super().disable()
base.LEVEL_TAGS = self.old_level_tags
class BaseTests:
storage_class = default_storage
levels = {
'debug': constants.DEBUG,
'info': constants.INFO,
'success': constants.SUCCESS,
'warning': constants.WARNING,
'error': constants.ERROR,
}
def setUp(self):
self.settings_override = override_settings_tags(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
),
},
}],
ROOT_URLCONF='messages_tests.urls',
MESSAGE_TAGS={},
MESSAGE_STORAGE='%s.%s' % (self.storage_class.__module__, self.storage_class.__name__),
SESSION_SERIALIZER='django.contrib.sessions.serializers.JSONSerializer',
)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def get_request(self):
return HttpRequest()
def get_response(self):
return HttpResponse()
def get_storage(self, data=None):
"""
Return the storage backend, setting its loaded data to the ``data``
argument.
This method avoids the storage ``_get`` method from getting called so
that other parts of the storage backend can be tested independent of
the message retrieval logic.
"""
storage = self.storage_class(self.get_request())
storage._loaded_data = data or []
return storage
def test_repr(self):
request = self.get_request()
storage = self.storage_class(request)
self.assertEqual(
repr(storage),
f'<{self.storage_class.__qualname__}: request=<HttpRequest>>',
)
def test_add(self):
storage = self.get_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 1')
self.assertTrue(storage.added_new)
storage.add(constants.INFO, 'Test message 2', extra_tags='tag')
self.assertEqual(len(storage), 2)
def test_add_lazy_translation(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, gettext_lazy('lazy message'))
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_no_update(self):
storage = self.get_storage()
response = self.get_response()
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_add_update(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 1')
storage.add(constants.INFO, 'Test message 1', extra_tags='tag')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 2)
def test_existing_add_read_update(self):
storage = self.get_existing_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 3')
list(storage) # Simulates a read
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_existing_read_add_update(self):
storage = self.get_existing_storage()
response = self.get_response()
list(storage) # Simulates a read
storage.add(constants.INFO, 'Test message 3')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_full_request_response_cycle(self):
"""
With the message middleware enabled, messages are properly stored and
retrieved across the full request/redirect/response cycle.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
messages = [Message(self.levels[level], msg) for msg in data['messages']]
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_with_template_response(self):
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_template_response')
for level in self.levels:
add_url = reverse('add_template_response', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
for msg in data['messages']:
self.assertContains(response, msg)
# there shouldn't be any messages on second GET request
response = self.client.get(show_url)
for msg in data['messages']:
self.assertNotContains(response, msg)
def test_context_processor_message_levels(self):
show_url = reverse('show_template_response')
response = self.client.get(show_url)
self.assertIn('DEFAULT_MESSAGE_LEVELS', response.context)
self.assertEqual(response.context['DEFAULT_MESSAGE_LEVELS'], DEFAULT_LEVELS)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_multiple_posts(self):
"""
Messages persist properly when multiple POSTs are made before a GET.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
messages = []
for level in ('debug', 'info', 'success', 'warning', 'error'):
messages.extend(Message(self.levels[level], msg) for msg in data['messages'])
add_url = reverse('add_message', args=(level,))
self.client.post(add_url, data)
response = self.client.get(show_url)
self.assertIn('messages', response.context)
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@modify_settings(
INSTALLED_APPS={'remove': 'django.contrib.messages'},
MIDDLEWARE={'remove': 'django.contrib.messages.middleware.MessageMiddleware'},
)
@override_settings(
MESSAGE_LEVEL=constants.DEBUG,
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
}],
)
def test_middleware_disabled(self):
"""
When the middleware is disabled, an exception is raised when one
attempts to store a message.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
with self.assertRaises(MessageFailure):
self.client.post(add_url, data, follow=True)
@modify_settings(
INSTALLED_APPS={'remove': 'django.contrib.messages'},
MIDDLEWARE={'remove': 'django.contrib.messages.middleware.MessageMiddleware'},
)
@override_settings(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
}],
)
def test_middleware_disabled_fail_silently(self):
"""
When the middleware is disabled, an exception is not raised
if 'fail_silently' is True.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
'fail_silently': True,
}
show_url = reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertNotIn('messages', response.context)
def stored_messages_count(self, storage, response):
"""
Return the number of messages being stored after a
``storage.update()`` call.
"""
raise NotImplementedError('This method must be set by a subclass.')
def test_get(self):
raise NotImplementedError('This method must be set by a subclass.')
def get_existing_storage(self):
return self.get_storage([
Message(constants.INFO, 'Test message 1'),
Message(constants.INFO, 'Test message 2', extra_tags='tag'),
])
def test_existing_read(self):
"""
Reading the existing storage doesn't cause the data to be lost.
"""
storage = self.get_existing_storage()
self.assertFalse(storage.used)
# After iterating the storage engine directly, the used flag is set.
data = list(storage)
self.assertTrue(storage.used)
# The data does not disappear because it has been iterated.
self.assertEqual(data, list(storage))
def test_existing_add(self):
storage = self.get_existing_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 3')
self.assertTrue(storage.added_new)
def test_default_level(self):
# get_level works even with no storage on the request.
request = self.get_request()
self.assertEqual(get_level(request), constants.INFO)
# get_level returns the default level if it hasn't been set.
storage = self.get_storage()
request._messages = storage
self.assertEqual(get_level(request), constants.INFO)
# Only messages of sufficient level get recorded.
add_level_messages(storage)
self.assertEqual(len(storage), 5)
def test_low_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 5))
self.assertEqual(get_level(request), 5)
add_level_messages(storage)
self.assertEqual(len(storage), 6)
def test_high_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 30))
self.assertEqual(get_level(request), 30)
add_level_messages(storage)
self.assertEqual(len(storage), 2)
@override_settings(MESSAGE_LEVEL=29)
def test_settings_level(self):
request = self.get_request()
storage = self.storage_class(request)
self.assertEqual(get_level(request), 29)
add_level_messages(storage)
self.assertEqual(len(storage), 3)
def test_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
storage.add(constants.INFO, 'A generic info message', extra_tags=None)
tags = [msg.tags for msg in storage]
self.assertEqual(tags, ['info', '', 'extra-tag debug', 'warning', 'error', 'success', 'info'])
def test_level_tag(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.level_tag for msg in storage]
self.assertEqual(tags, ['info', '', 'debug', 'warning', 'error', 'success'])
@override_settings_tags(MESSAGE_TAGS={
constants.INFO: 'info',
constants.DEBUG: '',
constants.WARNING: '',
constants.ERROR: 'bad',
29: 'custom',
})
def test_custom_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags, ['info', 'custom', 'extra-tag', '', 'bad', 'success'])
| ar4s/django | tests/messages_tests/base.py | Python | bsd-3-clause | 14,187 | 0.000705 |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to create the Chrome Updater Installer archive.
This script is used to create an archive of all the files required for a
Chrome Updater install in appropriate directory structure. It reads
updater.release file as input, creates updater.7z ucompressed archive, and
generates the updater.packed.7z compressed archive.
"""
import ConfigParser
import glob
import optparse
import os
import shutil
import subprocess
import sys
# Directory name inside the uncompressed archive where all the files are.
UPDATER_DIR = "bin"
# Suffix to uncompressed full archive file, appended to options.output_name.
ARCHIVE_SUFFIX = ".7z"
# compressed full archive suffix, will be prefixed by options.output_name.
COMPRESSED_ARCHIVE_SUFFIX = ".packed.7z"
TEMP_ARCHIVE_DIR = "temp_installer_archive"
g_archive_inputs = []
def CompressUsingLZMA(build_dir, compressed_file, input_file, verbose):
lzma_exec = GetLZMAExec(build_dir)
cmd = [lzma_exec,
'a', '-t7z',
# Flags equivalent to -mx9 (ultra) but with the bcj2 turned on (exe
# pre-filter). These arguments are the similar to what the Chrome mini
# installer is using.
'-m0=BCJ2',
'-m1=LZMA:d27:fb128',
'-m2=LZMA:d22:fb128:mf=bt2',
'-m3=LZMA:d22:fb128:mf=bt2',
'-mb0:1',
'-mb0s1:2',
'-mb0s2:3',
os.path.abspath(compressed_file),
os.path.abspath(input_file),]
if os.path.exists(compressed_file):
os.remove(compressed_file)
RunSystemCommand(cmd, verbose)
def CopyAllFilesToStagingDir(config, staging_dir, build_dir, timestamp):
"""Copies the files required for installer archive.
"""
CopySectionFilesToStagingDir(config, 'GENERAL', staging_dir, build_dir,
timestamp)
def CopySectionFilesToStagingDir(config, section, staging_dir, src_dir,
timestamp):
"""Copies installer archive files specified in section from src_dir to
staging_dir. This method reads section from config and copies all the
files specified from src_dir to staging dir.
"""
for option in config.options(section):
src_subdir = option.replace('\\', os.sep)
dst_dir = os.path.join(staging_dir, config.get(section, option))
dst_dir = dst_dir.replace('\\', os.sep)
src_paths = glob.glob(os.path.join(src_dir, src_subdir))
if src_paths and not os.path.exists(dst_dir):
os.makedirs(dst_dir)
for src_path in src_paths:
dst_path = os.path.join(dst_dir, os.path.basename(src_path))
if not os.path.exists(dst_path):
g_archive_inputs.append(src_path)
shutil.copy(src_path, dst_dir)
os.utime(dst_path, (os.stat(dst_path).st_atime, timestamp))
os.utime(dst_dir, (os.stat(dst_dir).st_atime, timestamp))
def GetLZMAExec(build_dir):
if sys.platform == 'win32':
lzma_exec = os.path.join(build_dir, "..", "..", "third_party",
"lzma_sdk", "Executable", "7za.exe")
else:
lzma_exec = '7zr' # Use system 7zr.
return lzma_exec
def MakeStagingDirectory(staging_dir):
"""Creates a staging path for installer archive. If directory exists already,
deletes the existing directory.
"""
file_path = os.path.join(staging_dir, TEMP_ARCHIVE_DIR)
if os.path.exists(file_path):
shutil.rmtree(file_path)
os.makedirs(file_path)
return file_path
def Readconfig(input_file):
"""Reads config information from input file after setting default value of
global variables.
"""
variables = {}
variables['UpdaterDir'] = UPDATER_DIR
config = ConfigParser.SafeConfigParser(variables)
config.read(input_file)
return config
def RunSystemCommand(cmd, verbose):
"""Runs |cmd|, prints the |cmd| and its output if |verbose|; otherwise
captures its output and only emits it on failure.
"""
if verbose:
print 'Running', cmd
try:
# Run |cmd|, redirecting stderr to stdout in order for captured errors to be
# inline with corresponding stdout.
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
if verbose:
print output
except subprocess.CalledProcessError as e:
raise Exception("Error while running cmd: %s\n"
"Exit code: %s\n"
"Command output:\n%s" %
(e.cmd, e.returncode, e.output))
def CreateArchiveFile(options, staging_dir, timestamp):
"""Creates a new installer archive file after deleting any existing old file.
"""
# First create an uncompressed archive file for the current build (updater.7z)
lzma_exec = GetLZMAExec(options.build_dir)
archive_file = os.path.join(options.output_dir,
options.output_name + ARCHIVE_SUFFIX)
if options.depfile:
# If a depfile was requested, do the glob of the staging dir and generate
# a list of dependencies in .d format. We list the files that were copied
# into the staging dir, not the files that are actually in the staging dir
# because the ones in the staging dir will never be edited, and we want
# to have the build be triggered when the thing-that-was-copied-there
# changes.
def PathFixup(path):
"""Fixes path for depfile format: backslash to forward slash, and
backslash escaping for spaces."""
return path.replace('\\', '/').replace(' ', '\\ ')
# Gather the list of files in the staging dir that will be zipped up. We
# only gather this list to make sure that g_archive_inputs is complete (i.e.
# that there's not file copies that got missed).
staging_contents = []
for root, files in os.walk(os.path.join(staging_dir, UPDATER_DIR)):
for filename in files:
staging_contents.append(PathFixup(os.path.join(root, filename)))
# Make sure there's an archive_input for each staging dir file.
for staging_file in staging_contents:
for archive_input in g_archive_inputs:
archive_rel = PathFixup(archive_input)
if (os.path.basename(staging_file).lower() ==
os.path.basename(archive_rel).lower()):
break
else:
raise Exception('Did not find an archive input file for "%s"' %
staging_file)
# Finally, write the depfile referencing the inputs.
with open(options.depfile, 'wb') as f:
f.write(PathFixup(os.path.relpath(archive_file, options.build_dir)) +
': \\\n')
f.write(' ' + ' \\\n '.join(PathFixup(x) for x in g_archive_inputs))
# It is important to use abspath to create the path to the directory because
# if you use a relative path without any .. sequences then 7za.exe uses the
# entire relative path as part of the file paths in the archive. If you have
# a .. sequence or an absolute path then only the last directory is stored as
# part of the file paths in the archive, which is what we want.
cmd = [lzma_exec,
'a',
'-t7z',
archive_file,
os.path.abspath(os.path.join(staging_dir, UPDATER_DIR)),
'-mx0',]
# There does not seem to be any way in 7za.exe to override existing file so
# we always delete before creating a new one.
if not os.path.exists(archive_file):
RunSystemCommand(cmd, options.verbose)
elif options.skip_rebuild_archive != "true":
os.remove(archive_file)
RunSystemCommand(cmd, options.verbose)
# Do not compress the archive when skip_archive_compression is specified.
if options.skip_archive_compression:
compressed_file = os.path.join(
options.output_dir, options.output_name + COMPRESSED_ARCHIVE_SUFFIX)
if os.path.exists(compressed_file):
os.remove(compressed_file)
return os.path.basename(archive_file)
compressed_archive_file = options.output_name + COMPRESSED_ARCHIVE_SUFFIX
compressed_archive_file_path = os.path.join(options.output_dir,
compressed_archive_file)
os.utime(archive_file, (os.stat(archive_file).st_atime, timestamp))
CompressUsingLZMA(options.build_dir, compressed_archive_file_path,
archive_file, options.verbose)
return compressed_archive_file
_RESOURCE_FILE_HEADER = """\
// This file is automatically generated by create_installer_archive.py.
// It contains the resource entries that are going to be linked inside the exe.
// For each file to be linked there should be two lines:
// - The first line contains the output filename (without path) and the
// type of the resource ('BN' - not compressed , 'BL' - LZ compressed,
// 'B7' - LZMA compressed)
// - The second line contains the path to the input file. Uses '/' to
// separate path components.
"""
def CreateResourceInputFile(
output_dir, archive_file, resource_file_path,
component_build, staging_dir):
"""Creates resource input file for installer target."""
# An array of (file, type, path) tuples of the files to be included.
resources = [(archive_file, 'B7',
os.path.join(output_dir, archive_file))]
with open(resource_file_path, 'w') as f:
f.write(_RESOURCE_FILE_HEADER)
for (file, type, path) in resources:
f.write('\n%s %s\n "%s"\n' % (file, type, path.replace("\\","/")))
def ParseDLLsFromDeps(build_dir, runtime_deps_file):
"""Parses the runtime_deps file and returns the set of DLLs in it, relative
to build_dir."""
build_dlls = set()
args = open(runtime_deps_file).read()
for l in args.splitlines():
if os.path.splitext(l)[1] == ".dll":
build_dlls.add(os.path.join(build_dir, l))
return build_dlls
# Copies component build DLLs for the setup to be able to find those DLLs at
# run-time.
# This is meant for developer builds only and should never be used to package
# an official build.
def DoComponentBuildTasks(staging_dir, build_dir, setup_runtime_deps):
installer_dir = os.path.join(staging_dir, UPDATER_DIR)
if not os.path.exists(installer_dir):
os.mkdir(installer_dir)
setup_component_dlls = ParseDLLsFromDeps(build_dir, setup_runtime_deps)
for setup_component_dll in setup_component_dlls:
g_archive_inputs.append(setup_component_dll)
shutil.copy(setup_component_dll, installer_dir)
def main(options):
"""Main method that reads input file, creates archive file and writes
resource input file.
"""
config = Readconfig(options.input_file)
staging_dir = MakeStagingDirectory(options.staging_dir)
# Copy the files from the build dir.
CopyAllFilesToStagingDir(config, staging_dir, options.build_dir,
options.timestamp)
if options.component_build == '1':
DoComponentBuildTasks(staging_dir, options.build_dir,
options.setup_runtime_deps)
# Name of the archive file built (for example - updater.7z)
archive_file = CreateArchiveFile(options, staging_dir, options.timestamp)
CreateResourceInputFile(options.output_dir,
archive_file, options.resource_file_path,
options.component_build == '1', staging_dir)
def _ParseOptions():
parser = optparse.OptionParser()
parser.add_option('-i', '--input_file',
help='Input file describing which files to archive.')
parser.add_option('-b', '--build_dir',
help='Build directory. The paths in input_file are relative to this.')
parser.add_option('--staging_dir',
help='Staging directory where intermediate files and directories '
'will be created')
parser.add_option('-o', '--output_dir',
help='The output directory where the archives will be written. '
'Defaults to the build_dir.')
parser.add_option('--resource_file_path',
help='The path where the resource file will be output. ')
parser.add_option('-s', '--skip_rebuild_archive',
default="False", help='Skip re-building updater.7z archive if it exists.')
parser.add_option('-n', '--output_name', default='updater',
help='Name used to prefix names of generated archives.')
parser.add_option('--component_build', default='0',
help='Whether this archive is packaging a component build.')
parser.add_option('--skip_archive_compression',
action='store_true', default=False,
help='Turn off compression of updater.7z into updater.packed.7z and '
'helpfully delete any old updater.packed.7z in |output_dir|.')
parser.add_option('--depfile',
help='Generate a depfile with the given name listing the implicit inputs '
'to the archive process that can be used with a build system.')
parser.add_option('--setup_runtime_deps',
help='A file listing runtime dependencies for setup.exe. This will be '
'used to get a list of DLLs to archive in a component build.')
parser.add_option('-v', '--verbose', action='store_true', dest='verbose',
default=False)
parser.add_option(
'--timestamp',
type='int',
help='Timestamp to set archive entry modified times to.')
options, _ = parser.parse_args()
if not options.build_dir:
parser.error('You must provide a build dir.')
options.build_dir = os.path.normpath(options.build_dir)
if not options.staging_dir:
parser.error('You must provide a staging dir.')
if not options.input_file:
parser.error('You must provide an input file')
is_component_build = options.component_build == '1'
if is_component_build and not options.setup_runtime_deps:
parser.error("updater_runtime_deps must be specified for a component build")
if not options.output_dir:
options.output_dir = options.build_dir
return options
if '__main__' == __name__:
options = _ParseOptions()
if options.verbose:
print sys.argv
sys.exit(main(options))
| endlessm/chromium-browser | chrome/updater/win/installer/create_installer_archive.py | Python | bsd-3-clause | 13,828 | 0.011065 |
""""
Meaningful Scales Detection: an Unsupervised Noise Detection Algorithm for \
Digital Contours
Demo Editor: B. Kerautret
"""
from lib import base_app, build, http, image, config
from lib.misc import app_expose, ctime
from lib.base_app import init_app
import cherrypy
from cherrypy import TimeoutError
import os.path
import shutil
import time
class app(base_app):
""" template demo app """
title = "Meaningful Scales Detection: an Unsupervised Noise "+\
"Detection Algorithm for Digital Contours"
xlink_article = 'http://www.ipol.im/pub/pre/75/'
xlink_src = 'http://www.ipol.im/pub/pre/75/meaningfulscaleDemo.tgz'
demo_src_filename = 'meaningfulscaleDemo.tgz'
demo_src_dir = 'meaningfulscaleDemo'
input_nb = 1 # number of input images
input_max_pixels = 4096 * 4096 # max size (in pixels) of an input image
input_max_weight = 1 * 4096 * 4096 # max size (in bytes) of an input file
input_dtype = '3x8i' # input image expected data type
input_ext = '.png' # input image expected extension (ie file format)
is_test = False # switch to False for deployment
list_commands = []
def __init__(self):
"""
app setup
"""
# setup the parent class
base_dir = os.path.dirname(os.path.abspath(__file__))
base_app.__init__(self, base_dir)
# select the base_app steps to expose
# index() is generic
app_expose(base_app.index)
app_expose(base_app.input_select)
app_expose(base_app.input_upload)
# params() is modified from the template
app_expose(base_app.params)
# run() and result() must be defined here
def build(self):
"""
program build/update
"""
# store common file path in variables
tgz_file = self.dl_dir + self.demo_src_filename
prog_names = ["meaningfulScaleEstim"]
script_names = ["applyMS.sh", "convert.sh", "convertFig.sh", \
"transformBG.sh"]
prog_bin_files = []
for f in prog_names:
prog_bin_files.append(self.bin_dir+ f)
log_file = self.base_dir + "build.log"
# get the latest source archive
build.download(self.xlink_src, tgz_file)
# test if the dest file is missing, or too old
if (os.path.isfile(prog_bin_files[0])
and ctime(tgz_file) < ctime(prog_bin_files[0])):
cherrypy.log("not rebuild needed",
context='BUILD', traceback=False)
else:
# extract the archive
build.extract(tgz_file, self.src_dir)
# build the program
build.run("mkdir %s;" %(self.src_dir+ self.demo_src_dir+"/build"), \
stdout=log_file)
build.run("cd %s; cmake .. -DCMAKE_BUILD_TYPE=Release \
-DBUILD_TESTING=false ; make -j 4" %(self.src_dir+ \
self.demo_src_dir+\
"/build"),
stdout=log_file)
# save into bin dir
if os.path.isdir(self.bin_dir):
shutil.rmtree(self.bin_dir)
os.mkdir(self.bin_dir)
shutil.copy(self.src_dir + self.demo_src_dir + \
"/build/demoIPOL/meaningfulScaleEstim", self.bin_dir)
for f in script_names :
shutil.copy(self.src_dir + os.path.join(self.demo_src_dir, \
"demoIPOL", f), self.bin_dir)
# copy annex file : pgm2freeman (extraction of contours)
shutil.copy(self.src_dir + self.demo_src_dir+ \
"/build/bin/pgm2freeman", self.bin_dir)
# copy Dynamic lib
shutil.copy(self.src_dir + self.demo_src_dir+ \
"/build/src/libImaGene.so", self.bin_dir)
# cleanup the source dir
shutil.rmtree(self.src_dir)
return
@cherrypy.expose
@init_app
def input_select(self, **kwargs):
"""
use the selected available input images
"""
self.init_cfg()
#kwargs contains input_id.x and input_id.y
input_id = kwargs.keys()[0].split('.')[0]
assert input_id == kwargs.keys()[1].split('.')[0]
# get the images
input_dict = config.file_dict(self.input_dir)
fnames = input_dict[input_id]['files'].split()
for i in range(len(fnames)):
shutil.copy(self.input_dir + fnames[i],
self.work_dir + 'input_%i' % i)
msg = self.process_input()
self.log("input selected : %s" % input_id)
self.cfg['meta']['original'] = False
self.cfg.save()
# jump to the params page
return self.params(msg=msg, key=self.key)
#---------------------------------------------------------------------------
# Parameter handling (an optional crop).
#---------------------------------------------------------------------------
@cherrypy.expose
@init_app
def params(self, newrun=False, msg=None):
"""Parameter handling (optional crop)."""
# if a new experiment on the same image, clone data
if newrun:
self.clone_input()
# save the input image as 'input_0_selection.png', the one to be used
img = image(self.work_dir + 'input_0.png')
img.save(self.work_dir + 'input_0_selection.png')
img.save(self.work_dir + 'input_0_selection.pgm')
# initialize subimage parameters
self.cfg['param'] = {'x1':-1, 'y1':-1, 'x2':-1, 'y2':-1}
self.cfg.save()
return self.tmpl_out('params.html')
@cherrypy.expose
@init_app
def wait(self, **kwargs):
"""
params handling and run redirection
"""
# save and validate the parameters
# handle image crop if used
if not 'action' in kwargs:
# read click coordinates
x = kwargs['click.x']
y = kwargs['click.y']
x1 = self.cfg['param']['x1']
y1 = self.cfg['param']['y1']
img = image(self.work_dir + 'input_0.png')
# check if the click is inside the image
if int(x) >= 0 and int(y) >= 0 and \
int(x) < img.size[0] and int(y) < img.size[1]:
if int(x1) < 0 or int(y1) < 0 : # first click
# update (x1,y1)
self.cfg['param']['x1'] = int(x)
self.cfg['param']['y1'] = int(y)
self.cfg.save()
# draw cross
img.convert('3x8i')
img.draw_cross((int(x), int(y)), size=9, color="red")
img.save(self.work_dir + 'input_0_selection.png')
elif int(x1) != int(x) and int(y1) != int(y) : # second click
# update (x2,y2)
self.cfg['param']['x2'] = int(x)
self.cfg['param']['y2'] = int(y)
self.cfg.save()
# order points such that (x1,y1) is the lower left corner
(x1, x2) = sorted((int(x1), int(x)))
(y1, y2) = sorted((int(y1), int(y)))
assert (x2 - x1) > 0 and (y2 - y1) > 0
# crop the image
img.crop((x1, y1, x2+1, y2+1))
img.save(self.work_dir + 'input_0_selection.png')
img.save(self.work_dir + 'input_0_selection.pgm')
return self.tmpl_out('params.html')
try:
self.cfg['param'] = {'tmax' : float(kwargs['tmax']),
'm' : float(kwargs['m'])}
except ValueError:
return self.error(errcode='badparams',
errmsg="The parameters must be numeric.")
self.cfg['param']['autothreshold'] = kwargs['thresholdtype'] == 'True'
http.refresh(self.base_url + 'run?key=%s' % self.key)
return self.tmpl_out("wait.html")
@cherrypy.expose
@init_app
def run(self):
"""
algo execution
"""
self.list_commands = ""
# read the parameters
t = self.cfg['param']['tmax']
m = self.cfg['param']['m']
autothreshold = self.cfg['param']['autothreshold']
# run the algorithm
try:
self.run_algo({'t':t, 'm':m, 'autothreshold':autothreshold})
except TimeoutError:
return self.error(errcode='timeout')
except RuntimeError:
return self.error(errcode='runtime')
except ValueError:
return self.error(errcode='badparams',
errmsg="The parameters given produce no contours,\
please change them.")
http.redir_303(self.base_url + 'result?key=%s' % self.key)
# archive
if self.cfg['meta']['original']:
ar = self.make_archive()
ar.add_file("input_0.png", "original.png", info="uploaded")
ar.add_file("input_0_selection.png","selection.png")
ar.add_file("resu.png", info="output")
ar.add_file("noiseLevels.txt", info="noise levels")
ar.add_file("inputContourFC.txt", info="polygon input")
ar.add_file("commands.txt", info="commands")
ar.add_file("resu.eps", info="result in eps format")
ar.add_info({"threshold auto": autothreshold})
ar.add_info({"threshold tmax": self.cfg['param']['tmax']})
ar.add_info({"contour min size m": m})
try:
version_file = open(self.work_dir + "version.txt", "w")
p = self.run_proc(["meaningfulScaleEstim", "-version"], \
stdout=version_file, \
env={'LD_LIBRARY_PATH' : self.bin_dir})
self.wait_proc(p)
version_file.close()
version_file = open(self.work_dir + "version.txt", "r")
version_info = version_file.readline()
version_file.close()
except Exception:
version_info = "unknown"
ar.add_info({"meaningfulScaleEstim version " : version_info})
ar.add_info({"#contours" : self.cfg['info']['num_contours']})
ar.add_info({"run time (s)" : self.cfg['info']['run_time']})
ar.save()
return self.tmpl_out("run.html")
def run_algo(self, params):
"""
the core algo runner
could also be called by a batch processor
this one needs no parameter
"""
# t, m, autothreshold
self.cfg['param']['sizex'] = image(self.work_dir + \
'input_0.png').size[0]
self.cfg['param']['sizey'] = image(self.work_dir + \
'input_0.png').size[1]
## -------
## process 1: transform input file
## ---------
command_args = ['/usr/bin/convert', 'input_0_selection.png', \
'input_0_selection.pgm' ]
self.runCommand(command_args)
## -------
## process 2: Extract 2D contours
## ---------
command_args = ['pgm2freeman']
if not params['autothreshold']:
command_args += ['-threshold', str(params['t']) ]
command_args += ['-min_size', str(params['m']) ]
fInput = open(self.work_dir+'input_0_selection.pgm', "r")
f = open(self.work_dir+'inputContour.txt', "w")
fInfo = open(self.work_dir+'info.txt', "w")
cntExtractionCmd = self.runCommand(command_args, stdIn=fInput, \
stdOut=f, stdErr=fInfo, \
comp = ' < input_0.pgm > inputContour.txt')
fInput.close()
f.close()
fInfo.close()
sizeContour = os.path.getsize(self.work_dir+"inputContour.txt")
if sizeContour == 0 :
raise ValueError
#Recover otsu max value from log
fInfo = open(self.work_dir+'info.txt', "r")
if self.cfg['param']['autothreshold']:
lines = fInfo.readlines()
line_cases = lines[0].split('=')
self.cfg['param']['tmax'] = float(line_cases[1])
fInfo.close()
self.commentsResultContourFile(cntExtractionCmd, self.work_dir+\
'inputContourFC.txt')
## -------
## process 3: Convert background image
## ---------
command_args = ['/usr/bin/convert', '-brightness-contrast', '40x-40' ]
command_args += ['input_0_selection.png', 'input_0BG.png']
self.runCommand(command_args)
## -------
## process 4:
## ---------
foutput = open(self.work_dir+'noiseLevels.txt', "w")
fLog = open(self.work_dir+'logMS.txt', "w")
fInput = open(self.work_dir+'inputContour.txt', "r")
command_args = ['meaningfulScaleEstim', '-enteteXFIG']+\
['-drawXFIGNoiseLevel', '-setFileNameFigure']+\
['noiseLevel.fig', '-drawContourSRC', '4', '1']+\
['-afficheImage', 'input_0BG.png']+\
[str(image(self.work_dir + 'input_0BG.png').size[0])] +\
[str(image(self.work_dir + 'input_0BG.png').size[1])] +\
['-setPosImage', '1', '1', '-printNoiseLevel'] + \
['-processAllContours']
try:
self.cfg['info']['run_time'] = time.time()
num_lines = sum(1 for line in open(self.work_dir + \
'inputContour.txt'))
self.cfg['info']['num_contours'] = num_lines
self.runCommand(command_args, stdIn=fInput, stdOut=foutput, \
stdErr=fLog,\
comp="< inputContour.txt > noiseLevels.txt")
except (OSError, RuntimeError):
fLog.write("Some contours were not processed.")
self.cfg['info']['run_time'] = time.time() - \
self.cfg['info']['run_time']
fInput.close()
fLog.close()
p = self.run_proc(['convertFig.sh','noiseLevel.fig'])
self.wait_proc(p, timeout=self.timeout)
## ----
## Final step: save command line
## ----
fcommands = open(self.work_dir+"commands.txt", "w")
fcommands.write(self.list_commands)
fcommands.close()
return
@cherrypy.expose
@init_app
def result(self, public=None):
"""
display the algo results
"""
resultHeight = image(self.work_dir + 'input_0_selection.png').size[1]
imageHeightResized = min (600, resultHeight)
resultHeight = max(200, resultHeight)
return self.tmpl_out("result.html", height=resultHeight, \
heightImageDisplay=imageHeightResized, \
width=image(self.work_dir\
+'input_0_selection.png').size[0])
def runCommand(self, command, stdIn=None, stdOut=None, stdErr=None, \
comp=None, outFileName=None):
"""
Run command and update the attribute list_commands
"""
p = self.run_proc(command, stdin=stdIn, stderr=stdErr, stdout=stdOut, \
env={'LD_LIBRARY_PATH' : self.bin_dir})
self.wait_proc(p, timeout=self.timeout)
index = 0
# transform convert.sh in it classic prog command (equivalent)
for arg in command:
if arg == "convert.sh" :
command[index] = "convert"
index = index + 1
command_to_save = ' '.join(['"' + arg + '"' if ' ' in arg else arg
for arg in command ])
if comp is not None:
command_to_save += comp
if outFileName is not None:
command_to_save += ' > ' + outFileName
self.list_commands += command_to_save + '\n'
return command_to_save
def commentsResultContourFile(self, command, fileStrContours):
"""
Add comments in the resulting contours (command line producing the file,
or file format info)
"""
contoursList = open (self.work_dir+"tmp.dat", "w")
contoursList.write("# Set of resulting contours obtained from the " +\
"pgm2freeman algorithm. \n")
contoursList.write( "# Each line corresponds to a digital " + \
"contour " + \
" given with the first point of the digital "+ \
"contour followed by its freeman code "+ \
"associated to each move from a point to "+ \
"another (4 connected: code 0, 1, 2, and 3).\n")
contoursList.write( "# Command to reproduce the result of the "+\
"algorithm:\n")
contoursList.write("# "+ command+'\n \n')
f = open (self.work_dir+'inputContour.txt', "r")
index = 0
for line in f:
contoursList.write("# contour number: "+ str(index) + "\n")
contoursList.write(line+"\n")
index = index +1
contoursList.close()
f.close()
shutil.copy(self.work_dir+'tmp.dat', fileStrContours)
os.remove(self.work_dir+'tmp.dat')
| juan-cardelino/matlab_demos | ipol_demo-light-1025b85/app_available/75/app.py | Python | gpl-2.0 | 17,698 | 0.00825 |
import json
import os
import datetime
for i in range(9):
try:
os.chdir('../call_record/')
with open('callsSortedperson_'+str(i)+'.json','r') as f:
data=json.load(f)
print 'User: ',i
#print len(data)
friends=[]
for j in range(len(data)):
#print data[j][0]
friends.append(data[j][0])
friends=list(set(friends))
#print len(friends)
#24 hour loop
time=data[0][2]
t=datetime.datetime.fromtimestamp(data[0][2]).strftime("%Y-%m-%d %H:%M:%S") #IST
#print t
calls=[]
for k in friends:
c=0
for j in range(len(data)):
if data[j][1]==2:
#In 86400 seconds all outgoing calls to one person
if k==data[j][0]:
#if data[j][2]<=(float(time)+86400):
t=datetime.datetime.fromtimestamp(data[j][2]).strftime("%Y-%m-%d %H:%M:%S") #IST
#print t
c+=1
#print c,k
calls.append(c)
#print len(calls)
k=[]
c=0
for j in range(len(friends)):
k.append(j+1)
if calls[j]==0:
c+=1
print c
#print zip(k, calls)
f=open('#CallsVsContacts'+str(i)+'.json','w')
json.dump(zip(k,calls),f,indent=2)
except Exception as e:
continue
| shreya2111/Recommender-System | outgoing_call_count.py | Python | gpl-2.0 | 1,175 | 0.095319 |
#!/usr/bin/python
# ===========================================================================
# Hatalogico KNIGHT RIDER for 8 LEDs - powered by Adafruit's Libraries
# -------------------------------------------------
# Date: 12/4/2015
# Author: John Lumley
#
# BIG THANKS TO ADAFRUIT INDUSTRIES FOR MAKING THIS POSSIBLE AND EASY
# ===========================================================================
import time, os, sys
# DETERMINE CURRENT PATH
scriptPath = os.path.realpath(os.path.dirname(sys.argv[0]))
os.chdir(scriptPath)
# APPEND FOLDER OF REQUIRED LIBRARY
sys.path.append("Adafruit/Adafruit_PWM_Servo_Driver")
# FINALLY LOAD THE LIBRARY
from Adafruit_PWM_Servo_Driver import PWM
LED_PIN_0 = 0
LED_PIN_1 = 2
LED_PIN_2 = 4
LED_PIN_3 = 6
LED_PIN_4 = 8
LED_PIN_5 = 10
LED_PIN_6 = 12
LED_PIN_7 = 14
LED_PIN_8 = 1
LED_PIN_9 = 3
LED_PIN_10 = 5
LED_PIN_11 = 7
LED_PIN_12 = 9
LED_PIN_13 = 11
LED_PIN_14 = 13
LED_PIN_15 = 15
BRIGHT_0=0
BRIGHT_1=1800
BRIGHT_2=2400
BRIGHT_3=3000
BRIGHT_4=3500
BRIGHT_5=3800
BRIGHT_6=4000
BRIGHT_7=4095
# BUILD 16 ARRAYS OF 8 POSITIONS/VALUES FOR PWM LOOP
position = {}
position[0] = [ BRIGHT_0, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[1] = [ BRIGHT_1, BRIGHT_0, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[2] = [ BRIGHT_2, BRIGHT_1, BRIGHT_0, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[3] = [ BRIGHT_3, BRIGHT_2, BRIGHT_1, BRIGHT_0, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[4] = [ BRIGHT_4, BRIGHT_3, BRIGHT_2, BRIGHT_1, BRIGHT_0, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[5] = [ BRIGHT_5, BRIGHT_4, BRIGHT_3, BRIGHT_2, BRIGHT_1, BRIGHT_0, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[6] = [ BRIGHT_6, BRIGHT_5, BRIGHT_4, BRIGHT_3, BRIGHT_2, BRIGHT_1, BRIGHT_0, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[7] = [ BRIGHT_7, BRIGHT_6, BRIGHT_5, BRIGHT_4, BRIGHT_3, BRIGHT_2, BRIGHT_1, BRIGHT_0, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[8] = [ BRIGHT_7, BRIGHT_7, BRIGHT_6, BRIGHT_5, BRIGHT_4, BRIGHT_3, BRIGHT_2, BRIGHT_1, BRIGHT_0, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[9] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_6, BRIGHT_5, BRIGHT_4, BRIGHT_3, BRIGHT_2, BRIGHT_1, BRIGHT_0, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[10] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_6, BRIGHT_5, BRIGHT_4, BRIGHT_3, BRIGHT_2, BRIGHT_1, BRIGHT_0, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[11] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_6, BRIGHT_5, BRIGHT_4, BRIGHT_3, BRIGHT_2, BRIGHT_1, BRIGHT_0, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[12] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_6, BRIGHT_5, BRIGHT_4, BRIGHT_3, BRIGHT_2, BRIGHT_1, BRIGHT_0, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[13] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_6, BRIGHT_5, BRIGHT_4, BRIGHT_3, BRIGHT_2, BRIGHT_1, BRIGHT_0, BRIGHT_7, BRIGHT_7 ]
position[14] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_6, BRIGHT_5, BRIGHT_4, BRIGHT_3, BRIGHT_2, BRIGHT_1, BRIGHT_0, BRIGHT_7 ]
position[15] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_6, BRIGHT_5, BRIGHT_4, BRIGHT_3, BRIGHT_2, BRIGHT_1, BRIGHT_0 ]
position[16] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_6, BRIGHT_5, BRIGHT_4, BRIGHT_3, BRIGHT_2, BRIGHT_0 ]
position[17] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_6, BRIGHT_5, BRIGHT_4, BRIGHT_3, BRIGHT_0 ]
position[18] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_6, BRIGHT_5, BRIGHT_4, BRIGHT_0 ]
position[19] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_6, BRIGHT_5, BRIGHT_0 ]
position[20] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_6, BRIGHT_0 ]
position[21] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_0 ]
position[22] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_0, BRIGHT_1 ]
position[23] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_0, BRIGHT_1, BRIGHT_2 ]
position[24] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_0, BRIGHT_1, BRIGHT_2, BRIGHT_3 ]
position[25] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_0, BRIGHT_1, BRIGHT_2, BRIGHT_3, BRIGHT_4 ]
position[26] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_0, BRIGHT_1, BRIGHT_2, BRIGHT_3, BRIGHT_4, BRIGHT_5 ]
position[27] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_0, BRIGHT_1, BRIGHT_2, BRIGHT_3, BRIGHT_4, BRIGHT_5, BRIGHT_6 ]
position[28] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_0, BRIGHT_1, BRIGHT_2, BRIGHT_3, BRIGHT_4, BRIGHT_5, BRIGHT_6, BRIGHT_7 ]
position[29] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_0, BRIGHT_1, BRIGHT_2, BRIGHT_3, BRIGHT_4, BRIGHT_5, BRIGHT_6, BRIGHT_7, BRIGHT_7 ]
position[30] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_0, BRIGHT_1, BRIGHT_2, BRIGHT_3, BRIGHT_4, BRIGHT_5, BRIGHT_6, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[31] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_0, BRIGHT_1, BRIGHT_2, BRIGHT_3, BRIGHT_4, BRIGHT_5, BRIGHT_6, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[32] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_0, BRIGHT_1, BRIGHT_2, BRIGHT_3, BRIGHT_4, BRIGHT_5, BRIGHT_6, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[33] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_0, BRIGHT_1, BRIGHT_2, BRIGHT_3, BRIGHT_4, BRIGHT_5, BRIGHT_6, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[34] = [ BRIGHT_7, BRIGHT_7, BRIGHT_0, BRIGHT_1, BRIGHT_2, BRIGHT_3, BRIGHT_4, BRIGHT_5, BRIGHT_6, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[35] = [ BRIGHT_7, BRIGHT_0, BRIGHT_1, BRIGHT_2, BRIGHT_3, BRIGHT_4, BRIGHT_5, BRIGHT_6, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[36] = [ BRIGHT_0, BRIGHT_1, BRIGHT_2, BRIGHT_3, BRIGHT_4, BRIGHT_5, BRIGHT_6, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[37] = [ BRIGHT_0, BRIGHT_2, BRIGHT_3, BRIGHT_4, BRIGHT_5, BRIGHT_6, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[38] = [ BRIGHT_0, BRIGHT_3, BRIGHT_4, BRIGHT_5, BRIGHT_6, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[39] = [ BRIGHT_0, BRIGHT_4, BRIGHT_5, BRIGHT_6, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[40] = [ BRIGHT_0, BRIGHT_5, BRIGHT_6, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[41] = [ BRIGHT_0, BRIGHT_6, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
# SETUP THE PWM DRIVER (I2C ADDRESS IS 70 BY DEFAULT ON HATALOGICO)
# EACH CHANNEL RANGES FROM 0 (off) TO 4095 (on)
pwm = PWM(0x70)
# SET FREQUENCY
pwm.setPWMFreq(120)
while (True):
for posNo in range(0, 42, +1):
pwm.setPWM(LED_PIN_0, 0, position[ posNo ][0])
pwm.setPWM(LED_PIN_1, 0, position[ posNo ][1])
pwm.setPWM(LED_PIN_2, 0, position[ posNo ][2])
pwm.setPWM(LED_PIN_3, 0, position[ posNo ][3])
pwm.setPWM(LED_PIN_4, 0, position[ posNo ][4])
pwm.setPWM(LED_PIN_5, 0, position[ posNo ][5])
pwm.setPWM(LED_PIN_6, 0, position[ posNo ][6])
pwm.setPWM(LED_PIN_7, 0, position[ posNo ][7])
pwm.setPWM(LED_PIN_8, 0, position[ posNo ][8])
pwm.setPWM(LED_PIN_9, 0, position[ posNo ][9])
pwm.setPWM(LED_PIN_10, 0, position[ posNo ][10])
pwm.setPWM(LED_PIN_11, 0, position[ posNo ][11])
pwm.setPWM(LED_PIN_12, 0, position[ posNo ][12])
pwm.setPWM(LED_PIN_13, 0, position[ posNo ][13])
pwm.setPWM(LED_PIN_14, 0, position[ posNo ][14])
pwm.setPWM(LED_PIN_15, 0, position[ posNo ][15])
# HAVE A LITTLE NAP
time.sleep(0.015)
| Jelby/Hatalogico | ledKnightRider16.py | Python | mit | 9,635 | 0.019512 |
callback_classes = [
['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<const ns3::Packet>', 'unsigned short', 'const ns3::Address &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<const ns3::Packet>', 'unsigned short', 'const ns3::Address &', 'const ns3::Address &', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<const ns3::Packet>', 'unsigned short', 'const ns3::Address &', 'const ns3::Address &', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<const ns3::MobilityModel>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<const ns3::Packet>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Packet>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'std::list<ns3::Ptr<ns3::LteControlMessage>, std::allocator<ns3::Ptr<ns3::LteControlMessage> > >', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned short', 'ns3::Ptr<ns3::SpectrumValue>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::DlInfoListElement_s', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::UlInfoListElement_s', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<const ns3::PacketBurst>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::PhyReceptionStatParameters', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned short', 'unsigned short', 'double', 'unsigned char', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::PhyTransmissionStatParameters', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned short', 'unsigned short', 'double', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned short', 'unsigned short', 'ns3::LteUePhy::State', 'ns3::LteUePhy::State', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned short', 'unsigned short', 'double', 'double', 'unsigned char', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned short', 'unsigned short', 'double', 'double', 'bool', 'unsigned char', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::Socket>', 'const ns3::Address &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'const ns3::Address &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::Packet>', 'const ns3::Address &', 'const ns3::Address &', 'unsigned short', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::EpcUeNas::State', 'ns3::EpcUeNas::State', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'const ns3::SpectrumValue &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned short', 'unsigned char', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned short', 'unsigned char', 'unsigned int', 'unsigned long', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned long', 'unsigned short', 'unsigned short', 'ns3::UeManager::State', 'ns3::UeManager::State', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned short', 'unsigned short', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned long', 'unsigned short', 'unsigned short', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned long', 'unsigned short', 'unsigned short', 'unsigned short', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned long', 'unsigned short', 'unsigned short', 'ns3::LteRrcSap::MeasurementReport', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::DlSchedulingCallbackInfo', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned int', 'unsigned int', 'unsigned short', 'unsigned char', 'unsigned short', 'unsigned char', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned long', 'unsigned short', 'unsigned short', 'ns3::LteUeRrc::State', 'ns3::LteUeRrc::State', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned long', 'unsigned short', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
| pradeepnazareth/NS-3-begining | src/lte/bindings/callbacks_list.py | Python | gpl-2.0 | 6,324 | 0.006167 |
from django.shortcuts import render
from django.utils.translation import activate
def index(request):
# latest_question_list = Question.objects.order_by('-pub_date')[:5]
# context = {'latest_question_list': latest_question_list}
# activate('pt-br')
# print(request.LANGUAGE_CODE)
context = {}
return render(request, 'index.html', context)
| torchmed/torchmed | torchmed/views.py | Python | mit | 364 | 0.002747 |
########################################################################
# MCU Gear(R) system Sample Code
# Auther:y.kou.
# web site: http://www.milletool.com/
# Date : 8/OCT/2016
#
########################################################################
#Revision Information
#
########################################################################
#!/usr/bin/python
from milpy import mil
from milpy import milMod
from milpy import wiringdata
from milpy import Moisuture
import time
wiringdata.initIO()
modA = milMod.milMod(Moisuture.getInfo(0))
if __name__=='__main__':
try:
while(1):
modA.connect()
readData = Moisuture.read(modA)
print "readData = ",readData
time.sleep(1)
modA.disconnect()
except KeyboardInterrupt:
print("detect key interrupt [ctrl]+ [C] \n")
mil.cleanup()
wiringdata.cleanup()
| yoshinarikou/MilleFeuilleRaspberryPi | milpython/MoistureTest.py | Python | mit | 835 | 0.027545 |
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import random
import functools
import logging
from binascii import crc32
from botocore.vendored.requests import ConnectionError, Timeout
from botocore.vendored.requests.packages.urllib3.exceptions import ClosedPoolError
from botocore.exceptions import ChecksumError, EndpointConnectionError
logger = logging.getLogger(__name__)
# The only supported error for now is GENERAL_CONNECTION_ERROR
# which maps to requests generic ConnectionError. If we're able
# to get more specific exceptions from requests we can update
# this mapping with more specific exceptions.
EXCEPTION_MAP = {
'GENERAL_CONNECTION_ERROR': [
ConnectionError, ClosedPoolError, Timeout,
EndpointConnectionError
],
}
def delay_exponential(base, growth_factor, attempts):
"""Calculate time to sleep based on exponential function.
The format is::
base * growth_factor ^ (attempts - 1)
If ``base`` is set to 'rand' then a random number between
0 and 1 will be used as the base.
Base must be greater than 0, otherwise a ValueError will be
raised.
"""
if base == 'rand':
base = random.random()
elif base <= 0:
raise ValueError("The 'base' param must be greater than 0, "
"got: %s" % base)
time_to_sleep = base * (growth_factor ** (attempts - 1))
return time_to_sleep
def create_exponential_delay_function(base, growth_factor):
"""Create an exponential delay function based on the attempts.
This is used so that you only have to pass it the attempts
parameter to calculate the delay.
"""
return functools.partial(
delay_exponential, base=base, growth_factor=growth_factor)
def create_retry_handler(config, operation_name=None):
checker = create_checker_from_retry_config(
config, operation_name=operation_name)
action = create_retry_action_from_config(
config, operation_name=operation_name)
return RetryHandler(checker=checker, action=action)
def create_retry_action_from_config(config, operation_name=None):
# The spec has the possibility of supporting per policy
# actions, but right now, we assume this comes from the
# default section, which means that delay functions apply
# for every policy in the retry config (per service).
delay_config = config['__default__']['delay']
if delay_config['type'] == 'exponential':
return create_exponential_delay_function(
base=delay_config['base'],
growth_factor=delay_config['growth_factor'])
def create_checker_from_retry_config(config, operation_name=None):
checkers = []
max_attempts = None
retryable_exceptions = []
if '__default__' in config:
policies = config['__default__'].get('policies', [])
max_attempts = config['__default__']['max_attempts']
for key in policies:
current_config = policies[key]
checkers.append(_create_single_checker(current_config))
retry_exception = _extract_retryable_exception(current_config)
if retry_exception is not None:
retryable_exceptions.extend(retry_exception)
if operation_name is not None and config.get(operation_name) is not None:
operation_policies = config[operation_name]['policies']
for key in operation_policies:
checkers.append(_create_single_checker(operation_policies[key]))
retry_exception = _extract_retryable_exception(
operation_policies[key])
if retry_exception is not None:
retryable_exceptions.extend(retry_exception)
if len(checkers) == 1:
# Don't need to use a MultiChecker
return MaxAttemptsDecorator(checkers[0], max_attempts=max_attempts)
else:
multi_checker = MultiChecker(checkers)
return MaxAttemptsDecorator(
multi_checker, max_attempts=max_attempts,
retryable_exceptions=tuple(retryable_exceptions))
def _create_single_checker(config):
if 'response' in config['applies_when']:
return _create_single_response_checker(
config['applies_when']['response'])
elif 'socket_errors' in config['applies_when']:
return ExceptionRaiser()
def _create_single_response_checker(response):
if 'service_error_code' in response:
checker = ServiceErrorCodeChecker(
status_code=response['http_status_code'],
error_code=response['service_error_code'])
elif 'http_status_code' in response:
checker = HTTPStatusCodeChecker(
status_code=response['http_status_code'])
elif 'crc32body' in response:
checker = CRC32Checker(header=response['crc32body'])
else:
# TODO: send a signal.
raise ValueError("Unknown retry policy: %s" % config)
return checker
def _extract_retryable_exception(config):
applies_when = config['applies_when']
if 'crc32body' in applies_when.get('response', {}):
return [ChecksumError]
elif 'socket_errors' in applies_when:
exceptions = []
for name in applies_when['socket_errors']:
exceptions.extend(EXCEPTION_MAP[name])
return exceptions
class RetryHandler(object):
"""Retry handler.
The retry handler takes two params, ``checker`` object
and an ``action`` object.
The ``checker`` object must be a callable object and based on a response
and an attempt number, determines whether or not sufficient criteria for
a retry has been met. If this is the case then the ``action`` object
(which also is a callable) determines what needs to happen in the event
of a retry.
"""
def __init__(self, checker, action):
self._checker = checker
self._action = action
def __call__(self, attempts, response, caught_exception, **kwargs):
"""Handler for a retry.
Intended to be hooked up to an event handler (hence the **kwargs),
this will process retries appropriately.
"""
if self._checker(attempts, response, caught_exception):
result = self._action(attempts=attempts)
logger.debug("Retry needed, action of: %s", result)
return result
logger.debug("No retry needed.")
class BaseChecker(object):
"""Base class for retry checkers.
Each class is responsible for checking a single criteria that determines
whether or not a retry should not happen.
"""
def __call__(self, attempt_number, response, caught_exception):
"""Determine if retry criteria matches.
Note that either ``response`` is not None and ``caught_exception`` is
None or ``response`` is None and ``caught_exception`` is not None.
:type attempt_number: int
:param attempt_number: The total number of times we've attempted
to send the request.
:param response: The HTTP response (if one was received).
:type caught_exception: Exception
:param caught_exception: Any exception that was caught while trying to
send the HTTP response.
:return: True, if the retry criteria matches (and therefore a retry
should occur. False if the criteria does not match.
"""
# The default implementation allows subclasses to not have to check
# whether or not response is None or not.
if response is not None:
return self._check_response(attempt_number, response)
elif caught_exception is not None:
return self._check_caught_exception(
attempt_number, caught_exception)
else:
raise ValueError("Both response and caught_exception are None.")
def _check_response(self, attempt_number, response):
pass
def _check_caught_exception(self, attempt_number, caught_exception):
pass
class MaxAttemptsDecorator(BaseChecker):
"""Allow retries up to a maximum number of attempts.
This will pass through calls to the decorated retry checker, provided
that the number of attempts does not exceed max_attempts. It will
also catch any retryable_exceptions passed in. Once max_attempts has
been exceeded, then False will be returned or the retryable_exceptions
that was previously being caught will be raised.
"""
def __init__(self, checker, max_attempts, retryable_exceptions=None):
self._checker = checker
self._max_attempts = max_attempts
self._retryable_exceptions = retryable_exceptions
def __call__(self, attempt_number, response, caught_exception):
should_retry = self._should_retry(attempt_number, response,
caught_exception)
if should_retry:
if attempt_number >= self._max_attempts:
logger.debug("Reached the maximum number of retry "
"attempts: %s", attempt_number)
return False
else:
return should_retry
else:
return False
def _should_retry(self, attempt_number, response, caught_exception):
if self._retryable_exceptions and \
attempt_number < self._max_attempts:
try:
return self._checker(attempt_number, response, caught_exception)
except self._retryable_exceptions as e:
logger.debug("retry needed, retryable exception caught: %s",
e, exc_info=True)
return True
else:
# If we've exceeded the max attempts we just let the exception
# propogate if one has occurred.
return self._checker(attempt_number, response, caught_exception)
class HTTPStatusCodeChecker(BaseChecker):
def __init__(self, status_code):
self._status_code = status_code
def _check_response(self, attempt_number, response):
if response[0].status_code == self._status_code:
logger.debug(
"retry needed: retryable HTTP status code received: %s",
self._status_code)
return True
else:
return False
class ServiceErrorCodeChecker(BaseChecker):
def __init__(self, status_code, error_code):
self._status_code = status_code
self._error_code = error_code
def _check_response(self, attempt_number, response):
if response[0].status_code == self._status_code:
actual_error_code = response[1].get('Error', {}).get('Code')
if actual_error_code == self._error_code:
logger.debug(
"retry needed: matching HTTP status and error code seen: "
"%s, %s", self._status_code, self._error_code)
return True
return False
class MultiChecker(BaseChecker):
def __init__(self, checkers):
self._checkers = checkers
def __call__(self, attempt_number, response, caught_exception):
for checker in self._checkers:
checker_response = checker(attempt_number, response,
caught_exception)
if checker_response:
return checker_response
return False
class CRC32Checker(BaseChecker):
def __init__(self, header):
# The header where the expected crc32 is located.
self._header_name = header
def _check_response(self, attempt_number, response):
http_response = response[0]
expected_crc = http_response.headers.get(self._header_name)
if expected_crc is None:
logger.debug("crc32 check skipped, the %s header is not "
"in the http response.", self._header_name)
else:
actual_crc32 = crc32(response[0].content) & 0xffffffff
if not actual_crc32 == int(expected_crc):
logger.debug(
"retry needed: crc32 check failed, expected != actual: "
"%s != %s", int(expected_crc), actual_crc32)
raise ChecksumError(checksum_type='crc32',
expected_checksum=int(expected_crc),
actual_checksum=actual_crc32)
class ExceptionRaiser(BaseChecker):
"""Raise any caught exceptions.
This class will raise any non None ``caught_exception``.
"""
def _check_caught_exception(self, attempt_number, caught_exception):
# This is implementation specific, but this class is useful by
# coordinating with the MaxAttemptsDecorator.
# The MaxAttemptsDecorator has a list of exceptions it should catch
# and retry, but something needs to come along and actually raise the
# caught_exception. That's what this class is being used for. If
# the MaxAttemptsDecorator is not interested in retrying the exception
# then this exception just propogates out past the retry code.
raise caught_exception
| arnavd96/Cinemiezer | myvenv/lib/python3.4/site-packages/botocore/retryhandler.py | Python | mit | 13,631 | 0.000147 |
from __future__ import print_function
import zlib
import numpy as np
from . import ncStream_pb2 as stream # noqa
MAGIC_HEADER = b'\xad\xec\xce\xda'
MAGIC_DATA = b'\xab\xec\xce\xba'
MAGIC_VDATA = b'\xab\xef\xfe\xba'
MAGIC_VEND = b'\xed\xef\xfe\xda'
MAGIC_ERR = b'\xab\xad\xba\xda'
def read_ncstream_messages(fobj):
messages = []
while True:
magic = read_magic(fobj)
if not magic:
break
if magic == MAGIC_HEADER:
messages.append(stream.Header())
messages[0].ParseFromString(read_block(fobj))
elif magic == MAGIC_DATA:
data = stream.Data()
data.ParseFromString(read_block(fobj))
if data.dataType in (stream.STRING, stream.OPAQUE) or data.vdata:
dt = _dtypeLookup.get(data.dataType, np.object_)
num_obj = read_var_int(fobj)
blocks = np.array([read_block(fobj) for _ in range(num_obj)], dtype=dt)
messages.append(blocks)
elif data.dataType in _dtypeLookup:
data_block = read_numpy_block(fobj, data)
messages.append(data_block)
elif data.dataType in (stream.STRUCTURE, stream.SEQUENCE):
blocks = []
magic = read_magic(fobj)
while magic != MAGIC_VEND:
assert magic == MAGIC_VDATA, 'Bad magic for struct/seq data!'
blocks.append(stream.StructureData())
blocks[0].ParseFromString(read_block(fobj))
magic = read_magic(fobj)
messages.append((data, blocks))
else:
raise NotImplementedError("Don't know how to handle data type: %d" %
data.dataType)
elif magic == MAGIC_ERR:
err = stream.Error()
err.ParseFromString(read_block(fobj))
raise RuntimeError(err.message)
else:
print('Unknown magic: ' + str(' '.join('%02x' % b for b in magic)))
return messages
def read_magic(fobj):
return fobj.read(4)
def read_block(fobj):
num = read_var_int(fobj)
return fobj.read(num)
def read_numpy_block(fobj, data_header):
dt = data_type_to_numpy(data_header.dataType)
dt.newbyteorder('>' if data_header.bigend else '<')
shape = tuple(r.size for r in data_header.section.range)
buf = read_block(fobj)
if data_header.compress == stream.DEFLATE:
buf = zlib.decompress(buf)
assert len(buf) == data_header.uncompressedSize
elif data_header.compress != stream.NONE:
raise NotImplementedError('Compression type %d not implemented!' %
data_header.compress)
return np.frombuffer(bytearray(buf), dtype=dt).reshape(*shape)
# STRUCTURE = 8;
# SEQUENCE = 9;
_dtypeLookup = {stream.CHAR: 'b', stream.BYTE: 'b', stream.SHORT: 'i2',
stream.INT: 'i4', stream.LONG: 'i8', stream.FLOAT: 'f4',
stream.DOUBLE: 'f8', stream.STRING: np.string_,
stream.ENUM1: 'B', stream.ENUM2: 'u2', stream.ENUM4: 'u4',
stream.OPAQUE: 'O'}
def data_type_to_numpy(datatype, unsigned=False):
basic_type = _dtypeLookup[datatype]
if datatype in (stream.STRING, stream.OPAQUE):
return np.dtype(basic_type)
if unsigned:
basic_type = basic_type.replace('i', 'u')
return np.dtype('>' + basic_type)
def unpack_variable(var):
dt = data_type_to_numpy(var.dataType, var.unsigned)
if var.dataType == stream.OPAQUE:
type_name = 'opaque'
elif var.dataType == stream.STRING:
type_name = 'string'
else:
type_name = dt.type.__name__
if var.data:
if var.dataType is str:
data = var.data
else:
data = np.fromstring(var.data, dtype=dt)
else:
data = None
return data, dt, type_name
_attrConverters = {stream.Attribute.BYTE: np.dtype('>b'),
stream.Attribute.SHORT: np.dtype('>i2'),
stream.Attribute.INT: np.dtype('>i4'),
stream.Attribute.LONG: np.dtype('>i8'),
stream.Attribute.FLOAT: np.dtype('>f4'),
stream.Attribute.DOUBLE: np.dtype('>f8')}
def unpack_attribute(att):
if att.unsigned:
print('Warning: Unsigned attribute!')
if att.len == 0:
val = None
elif att.type == stream.Attribute.STRING:
val = att.sdata
else:
val = np.fromstring(att.data,
dtype=_attrConverters[att.type], count=att.len)
if att.len == 1:
val = val[0]
return att.name, val
def read_var_int(file_obj):
'Read a variable-length integer'
# Read all bytes from here, stopping with the first one that does not have
# the MSB set. Save the lower 7 bits, and keep stacking to the *left*.
val = 0
shift = 0
while True:
# Read next byte
next_val = ord(file_obj.read(1))
val = ((next_val & 0x7F) << shift) | val
shift += 7
if not next_val & 0x80:
break
return val
| hyoklee/siphon | siphon/cdmr/ncstream.py | Python | mit | 5,147 | 0.000971 |
#!/usr/bin/env python
"""
runtests.py [OPTIONS] [-- ARGS]
Run tests, building the project first.
Examples::
$ python runtests.py
$ python runtests.py -t {SAMPLE_TEST}
"""
from __future__ import division, print_function
PROJECT_MODULE = "msmbuilder"
PROJECT_ROOT_FILES = ['msmbuilder', 'LICENSE', 'setup.py']
SAMPLE_TEST = "msmbuilder.tests.test_msm:test_ergodic_cutoff"
EXTRA_PATH = ['/usr/lib/ccache', '/usr/lib/f90cache',
'/usr/local/lib/ccache', '/usr/local/lib/f90cache']
# ---------------------------------------------------------------------
if __doc__ is None:
__doc__ = "Run without -OO if you want usage info"
else:
__doc__ = __doc__.format(**globals())
import sys
import os
# In case we are run from the source directory, we don't want to import the
# project from there:
sys.path.pop(0)
import shutil
import subprocess
import time
from argparse import ArgumentParser, REMAINDER
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__)))
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("--no-build", "-n", action="store_true", default=False,
help="do not build the project "
"(use system installed version)")
parser.add_argument("--build-only", "-b", action="store_true",
default=False,
help="just build, do not run any tests")
parser.add_argument("--tests", "-t", action='append',
help="Specify tests to run")
parser.add_argument("--debug", "-g", action="store_true",
help="Debug build")
parser.add_argument("--show-build-log", action="store_true",
help="Show build output rather than using a log file")
parser.add_argument("--verbose", "-v", action="count", default=1,
help="more verbosity")
parser.add_argument("--no-verbose", action='store_true', default=False,
help="Default nose verbosity is -v. "
"This turns that off")
parser.add_argument("--ipython", action='store_true', default=False,
help="Launch an ipython shell instead of nose")
parser.add_argument("args", metavar="ARGS", default=[], nargs=REMAINDER,
help="Arguments to pass to Nose")
args = parser.parse_args(argv)
if not args.no_build:
site_dir, dst_dir = build_project(args)
sys.path.insert(0, site_dir)
os.environ['PYTHONPATH'] = site_dir
os.environ['PATH'] = dst_dir + "/bin:" + os.environ['PATH']
if args.build_only:
sys.exit(0)
if args.ipython:
commands = ['ipython']
else:
commands = ['nosetests', '--with-timer', '--timer-top-n', '5']
if args.verbose > 0 and not args.no_verbose:
verbosity = "-{vs}".format(vs="v" * args.verbose)
commands += [verbosity]
if args.tests:
commands += args.tests[:]
else:
commands += ["{}.tests".format(PROJECT_MODULE)]
extra_argv = args.args[:]
if extra_argv and extra_argv[0] == '--':
extra_argv = extra_argv[1:]
commands += extra_argv
# Run the tests under build/test
test_dir = os.path.join("build", "test")
try:
shutil.rmtree(test_dir)
except OSError:
pass
try:
os.makedirs(test_dir)
except OSError:
pass
cwd = os.getcwd()
try:
os.chdir(test_dir)
result = subprocess.call(commands)
finally:
os.chdir(cwd)
sys.exit(result)
def build_project(args):
"""
Build a dev version of the project.
Returns
-------
site_dir
site-packages directory where it was installed
"""
root_ok = [os.path.exists(os.path.join(ROOT_DIR, fn))
for fn in PROJECT_ROOT_FILES]
if not all(root_ok):
print("To build the project, run runtests.py in "
"git checkout or unpacked source")
sys.exit(1)
dst_dir = os.path.join(ROOT_DIR, 'build', 'testenv')
env = dict(os.environ)
cmd = [sys.executable, 'setup.py']
# Always use ccache, if installed
prev_path = env.get('PATH', '').split(os.pathsep)
env['PATH'] = os.pathsep.join(EXTRA_PATH + prev_path)
if args.debug:
# assume everyone uses gcc/gfortran
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
cmd += ["build"]
# Install; avoid producing eggs so numpy can be imported from dst_dir.
cmd += ['install', '--prefix=' + dst_dir,
'--single-version-externally-managed',
'--record=' + dst_dir + 'tmp_install_log.txt']
log_filename = os.path.join(ROOT_DIR, 'build.log')
if args.show_build_log:
ret = subprocess.call(cmd, env=env, cwd=ROOT_DIR)
else:
log_filename = os.path.join(ROOT_DIR, 'build.log')
print("Building, see build.log...")
with open(log_filename, 'w') as log:
p = subprocess.Popen(cmd, env=env, stdout=log, stderr=log,
cwd=ROOT_DIR)
# Wait for it to finish, and print something to indicate the
# process is alive, but only if the log file has grown (to
# allow continuous integration environments kill a hanging
# process accurately if it produces no output)
last_blip = time.time()
last_log_size = os.stat(log_filename).st_size
while p.poll() is None:
time.sleep(0.5)
if time.time() - last_blip > 60:
log_size = os.stat(log_filename).st_size
if log_size > last_log_size:
print(" ... build in progress")
last_blip = time.time()
last_log_size = log_size
ret = p.wait()
if ret == 0:
print("Build OK")
else:
if not args.show_build_log:
with open(log_filename, 'r') as f:
print(f.read())
print("Build failed!")
sys.exit(1)
from distutils.sysconfig import get_python_lib
return get_python_lib(prefix=dst_dir, plat_specific=True), dst_dir
if __name__ == "__main__":
main(argv=sys.argv[1:])
| Eigenstate/msmbuilder | runtests.py | Python | lgpl-2.1 | 6,293 | 0.000953 |
import fauxfactory
import pytest
from widgetastic.utils import partial_match
from wrapanapi.exceptions import ImageNotFoundError
from cfme import test_requirements
from cfme.cloud.provider.azure import AzureProvider
from cfme.cloud.provider.ec2 import EC2Provider
from cfme.exceptions import ItemNotFound
from cfme.markers.env_markers.provider import ONE_PER_TYPE
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.log import logger
from cfme.utils.wait import wait_for
pytestmark = [
pytest.mark.provider([EC2Provider], scope='function'),
pytest.mark.usefixtures('setup_provider', 'refresh_provider'),
test_requirements.tag
]
@pytest.fixture(scope='function')
def map_tags(appliance, provider, request):
tag = appliance.collections.map_tags.create(entity_type=partial_match(provider.name.title()),
label='test',
category='Testing')
yield tag
request.addfinalizer(lambda: tag.delete())
@pytest.fixture(scope='function')
def tagged_vm(provider):
# cu-24x7 vm is tagged with test:testing in provider
tag_vm = provider.data.cap_and_util.capandu_vm
collection = provider.appliance.provider_based_collection(provider)
try:
return collection.instantiate(tag_vm, provider)
except IndexError:
raise ItemNotFound('VM for tag mapping not found!')
@pytest.fixture(scope='function')
def refresh_provider(provider):
provider.refresh_provider_relationships(wait=600)
return True
@pytest.fixture(params=['instances', 'images'])
def tag_mapping_items(request, appliance, provider):
entity_type = request.param
collection = getattr(appliance.collections, 'cloud_{}'.format(entity_type))
collection.filters = {'provider': provider}
view = navigate_to(collection, 'AllForProvider')
name = view.entities.get_first_entity().name
try:
mgmt_item = (
provider.mgmt.get_template(name)
if entity_type == 'images'
else provider.mgmt.get_vm(name)
)
except ImageNotFoundError:
msg = 'Failed looking up template [{}] from CFME on provider: {}'.format(name, provider)
logger.exception(msg)
pytest.skip(msg)
return collection.instantiate(name=name, provider=provider), mgmt_item, entity_type
def tag_components():
# Return tuple with random tag_label and tag_value
return (
fauxfactory.gen_alphanumeric(15, start="tag_label_"),
fauxfactory.gen_alphanumeric(15, start="tag_value_")
)
@pytest.mark.provider([AzureProvider], selector=ONE_PER_TYPE, scope='function')
def test_tag_mapping_azure_instances(tagged_vm, map_tags):
""""
Polarion:
assignee: anikifor
casecomponent: Cloud
caseimportance: high
initialEstimate: 1/12h
testSteps:
1. Find Instance that tagged with test:testing in Azure (cu-24x7)
2. Create tag mapping for Azure instances
3. Refresh Provider
4. Go to Summary of the Instance and read Smart Management field
expectedResults:
1.
2.
3.
4. Field value is "My Company Tags Testing: testing"
"""
tagged_vm.provider.refresh_provider_relationships()
view = navigate_to(tagged_vm, 'Details')
def my_company_tags():
return view.tag.get_text_of('My Company Tags') != 'No My Company Tags have been assigned'
# sometimes it's not updated immediately after provider refresh
wait_for(
my_company_tags,
timeout=600,
delay=45,
fail_func=view.toolbar.reload.click
)
assert view.tag.get_text_of('My Company Tags')[0] == 'Testing: testing'
# TODO: Azure needs tagging support in wrapanapi
def test_labels_update(provider, tag_mapping_items, soft_assert):
"""" Test updates of tag labels on entity details
Polarion:
assignee: anikifor
casecomponent: Cloud
caseimportance: high
initialEstimate: 1/12h
testSteps:
1. Set a tag through provider mgmt interface
2. Refresh Provider
3. Go to entity details and get labels
4. unset tag through provider mgmt interface
5. Go to entity details and get labels
expectedResults:
1.
2.
3. labels includes label + tag
4.
5. labels should not include tag label
"""
entity, mgmt_entity, entity_type = tag_mapping_items
tag_label, tag_value = tag_components()
mgmt_entity.set_tag(tag_label, tag_value)
provider.refresh_provider_relationships(method='ui')
view = navigate_to(entity, 'Details')
# get_tags() doesn't work here as we're looking at labels, not smart management
current_tag_value = view.entities.summary('Labels').get_text_of(tag_label)
soft_assert(
current_tag_value == tag_value, (
'Tag values is not that expected, actual - {}, expected - {}'.format(
current_tag_value, tag_value
)
)
)
mgmt_entity.unset_tag(tag_label, tag_value)
provider.refresh_provider_relationships(method='ui')
view = navigate_to(entity, 'Details', force=True)
fields = view.entities.summary('Labels').fields
soft_assert(
tag_label not in fields,
'{} label was not removed from details page'.format(tag_label)
)
# TODO: Azure needs tagging support in wrapanapi
def test_mapping_tags(
appliance, provider, tag_mapping_items, soft_assert, category, request
):
"""Test mapping tags on provider instances and images
Polarion:
assignee: anikifor
casecomponent: Cloud
caseimportance: high
initialEstimate: 1/12h
testSteps:
1. Set a tag through provider mgmt interface
2. create a CFME tag map for entity type
3. Go to entity details and get smart management table
4. Delete the tag map
5. Go to entity details and get smart management table
expectedResults:
1.
2.
3. smart management should include category name and tag
4.
5. smart management table should NOT include category name and tag
"""
entity, mgmt_entity, entity_type = tag_mapping_items
tag_label, tag_value = tag_components()
mgmt_entity.set_tag(tag_label, tag_value)
request.addfinalizer(
lambda: mgmt_entity.unset_tag(tag_label, tag_value)
)
provider_type = provider.discover_name.split(' ')[0]
# Check the add form to find the correct resource entity type selection string
view = navigate_to(appliance.collections.map_tags, 'Add')
select_text = None # init this since we set it within if, and reference it in for/else:
options = [] # track the option strings for logging in failure
for option in view.resource_entity.all_options:
option_text = option.text # read it once since its used multiple times
options.append(option_text)
if provider_type in option_text and entity_type.capitalize()[:-1] in option_text:
select_text = option_text
break
else:
# no match / break for select_text
if select_text is None:
pytest.fail(
'Failed to match the entity type [{e}] and provider type [{p}] in options: [{o}]'
.format(e=entity_type, p=provider_type, o=options)
)
view.cancel_button.click() # close the open form
map_tag = appliance.collections.map_tags.create(
entity_type=select_text,
label=tag_label,
category=category.name
)
# check the tag shows up
provider.refresh_provider_relationships(method='ui')
soft_assert(any(
tag.category.display_name == category.name and tag.display_name == tag_value
for tag in entity.get_tags()
), '{}: {} was not found in tags'.format(category.name, tag_value))
# delete it
map_tag.delete()
# check the tag goes away
provider.refresh_provider_relationships(method='ui')
soft_assert(not '{}: {}'.format(category.name, tag_value) in entity.get_tags())
@pytest.mark.tier(2)
@pytest.mark.parametrize("collection_type", ["vms", "templates"])
@pytest.mark.provider([EC2Provider], scope='function')
def test_ec2_tags(provider, request, collection_type, testing_instance):
"""
Requirement: Have an ec2 provider
Polarion:
assignee: anikifor
casecomponent: Cloud
caseimportance: medium
initialEstimate: 1/6h
startsin: 5.8
testSteps:
1. Create an instance/choose image
2. tag it with test:testing on EC side
3. Refresh provider
4. Go to summary of this instance/image and check whether there is
test:testing in Labels field
5. Delete that instance/untag image
"""
tag_key = f"test_{fauxfactory.gen_alpha()}"
tag_value = f"testing_{fauxfactory.gen_alpha()}"
if collection_type == "templates":
taggable = provider.mgmt.list_templates()[0]
request.addfinalizer(lambda: taggable.unset_tag(tag_key, tag_value))
else:
taggable = testing_instance.mgmt
taggable.set_tag(tag_key, tag_value)
provider.refresh_provider_relationships(wait=600)
collection = provider.appliance.provider_based_collection(provider, coll_type=collection_type)
taggable_in_cfme = collection.instantiate(taggable.name, provider)
view = navigate_to(taggable_in_cfme, 'Details')
assert view.entities.summary("Labels").get_text_of(tag_key) == tag_value
| izapolsk/integration_tests | cfme/tests/cloud/test_tag_mapping.py | Python | gpl-2.0 | 9,719 | 0.001338 |
"""Helpers for testing Met Office DataPoint."""
from homeassistant.components.metoffice.const import DOMAIN
from homeassistant.const import CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME
TEST_DATETIME_STRING = "2020-04-25T12:00:00+00:00"
TEST_API_KEY = "test-metoffice-api-key"
TEST_LATITUDE_WAVERTREE = 53.38374
TEST_LONGITUDE_WAVERTREE = -2.90929
TEST_SITE_NAME_WAVERTREE = "Wavertree"
TEST_LATITUDE_KINGSLYNN = 52.75556
TEST_LONGITUDE_KINGSLYNN = 0.44231
TEST_SITE_NAME_KINGSLYNN = "King's Lynn"
METOFFICE_CONFIG_WAVERTREE = {
CONF_API_KEY: TEST_API_KEY,
CONF_LATITUDE: TEST_LATITUDE_WAVERTREE,
CONF_LONGITUDE: TEST_LONGITUDE_WAVERTREE,
CONF_NAME: TEST_SITE_NAME_WAVERTREE,
}
METOFFICE_CONFIG_KINGSLYNN = {
CONF_API_KEY: TEST_API_KEY,
CONF_LATITUDE: TEST_LATITUDE_KINGSLYNN,
CONF_LONGITUDE: TEST_LONGITUDE_KINGSLYNN,
CONF_NAME: TEST_SITE_NAME_KINGSLYNN,
}
KINGSLYNN_SENSOR_RESULTS = {
"weather": ("weather", "sunny"),
"visibility": ("visibility", "Very Good"),
"visibility_distance": ("visibility_distance", "20-40"),
"temperature": ("temperature", "14"),
"feels_like_temperature": ("feels_like_temperature", "13"),
"uv": ("uv_index", "6"),
"precipitation": ("probability_of_precipitation", "0"),
"wind_direction": ("wind_direction", "E"),
"wind_gust": ("wind_gust", "7"),
"wind_speed": ("wind_speed", "2"),
"humidity": ("humidity", "60"),
}
WAVERTREE_SENSOR_RESULTS = {
"weather": ("weather", "sunny"),
"visibility": ("visibility", "Good"),
"visibility_distance": ("visibility_distance", "10-20"),
"temperature": ("temperature", "17"),
"feels_like_temperature": ("feels_like_temperature", "14"),
"uv": ("uv_index", "5"),
"precipitation": ("probability_of_precipitation", "0"),
"wind_direction": ("wind_direction", "SSE"),
"wind_gust": ("wind_gust", "16"),
"wind_speed": ("wind_speed", "9"),
"humidity": ("humidity", "50"),
}
DEVICE_KEY_KINGSLYNN = {
(DOMAIN, f"{TEST_LATITUDE_KINGSLYNN}_{TEST_LONGITUDE_KINGSLYNN}")
}
DEVICE_KEY_WAVERTREE = {
(DOMAIN, f"{TEST_LATITUDE_WAVERTREE}_{TEST_LONGITUDE_WAVERTREE}")
}
| jawilson/home-assistant | tests/components/metoffice/const.py | Python | apache-2.0 | 2,161 | 0.000463 |
from sys import stdin
def readLine():
return stdin.readline().strip()
def readInt():
return int(readLine())
def readInts():
return list(map(int, readLine().split()))
def main():
T = readInt()
for i in range(T):
pages = [{'url': None, 'v': 0} for j in range(10)]
for j in range(10):
pages[j]['url'], pages[j]['v'] = readLine().split()
pages[j]['v'] = int(pages[j]['v'])
maxVal = max(pages, key=lambda x: x['v'])['v']
pages = list(filter(lambda x: x['v'] == maxVal, pages))
print('Case #%d:' %(i + 1))
for p in pages:
print(p['url'])
if __name__ == '__main__':
main()
| mikebsg01/Contests-Online | UVa/12015-GoogleisFeelingLucky.py | Python | mit | 604 | 0.038079 |
import sys
import numpy as np
from copy import copy, deepcopy
import multiprocessing as mp
from numpy.random import shuffle, random, normal
from math import log, sqrt, exp, pi
import itertools as it
from scipy.stats import gaussian_kde, pearsonr
from scipy.stats import ttest_1samp
from itertools import product
try:
from Crypto.pct_warnings import PowmInsecureWarning
import warnings
warnings.simplefilter("ignore", PowmInsecureWarning)
except:
pass
# In this work, I am computing transfer entropies
# by, first, discretizing expression values into a given
# number of bins. Using those bins, the probability of a given
# interval is computed, and the joint probability over time
# can also be computed (given two time series).
# Want P(X_t+1, X_k2, Y_k1) * log (P(X_t+1,Y_k1,X_k2)*P(X_t+1)) / (P(X_t+1, X_k2)*P(X_k2,Y_K1))
# just get the joint, then get the others by marginalization
# parameters:
# yk: the markov order for Y = let it be 1
# xk: the markov order for x = let it be 1
# yl: the time delay for y
# xl: the time delay for x
# b : the number of bins
# autoTE is
# FOR TE (Y -> X)
def autoshuff((x,y)):
permutedY = deepcopy(y)
shuffle(permutedY)
return(pearsonr(x, permutedY)[0])
def autoCorr(x,y,reps1, cpus):
pool = mp.Pool(cpus)
observed = pearsonr(x,y)[0]
permutedList = it.repeat( (x,y), reps1)
permutedCor = pool.map(autoshuff, permutedList)
pool.close()
return([observed] + permutedCor)
def geneindex(gene, genes):
for i in range(0,len(genes)):
if gene in genes[i]:
return(i)
return(-1)
def prepGeneDataGG(dats, genes, g1, g2):
i = geneindex(g1, genes) # from
j = geneindex(g2, genes) # to
if (i > -1 and j > -1):
x = map(float,dats[i]) #from
y = map(float,dats[j]) # to
x = np.array(x); x = (x-x.mean())/max(1,(x-x.mean()).max())
y = np.array(y); y = (y-y.mean())/max(1,(y-y.mean()).max())
return((x,y))
else:
return( ([],[]) )
def corEdges(exprfile, genefile, fileout, reps, cpus, g1, g2):
genes = open(genefile,'r').read().strip().split("\n")
dat = open(exprfile,'r').read().strip().split("\n")
dats = map(lambda x: x.split("\t"), dat)
fout = open(fileout,'w')
(fromx,toy) = prepGeneDataGG(dats, genes, g1, g2)
res0 = autoCorr(fromx,toy,reps, cpus)
fout.write(g1 +"\t"+ g2 +"\t"+ "\t".join(map(str,res0)) +"\n")
fout.close()
def maxLagCorEdges(exprfile, genefile, fileout, reps, cpus, ylmax, g1, g2):
genes = open(genefile,'r').read().strip().split("\n")
dat = open(exprfile,'r').read().strip().split("\n")
dats = map(lambda x: x.split("\t"), dat)
fout = open(fileout,'w')
(fromx,toy) = prepGeneDataGG(dats, genes, g1, g2)
maxCorr = 0.0
maxLag = 0.0
for yl in range(0,(ylmax+1)):
try:
res0 = autoCorr(fromx,toy,reps, cpus)
if (res0[0] > maxCorr):
maxTE = res0
maxLag = yl
except:
e = sys.exc_info()
sys.stderr.write(str(e)+"\n")
fout.write(g1 +"\t"+ g2 +"\t"+ str(maxLag) +"\t"+ str(maxCorr) +"\t"+ "\t".join(map(str,res0)) +"\n")
fout.close()
def main(argv):
#for i in range(1,len(argv)):
# print(str(i) +" "+ argv[i])
exprfile = argv[1]
genefile = argv[2]
fileout = argv[3]
reps = int(argv[4])
cpus = int(argv[5])
g1 = argv[6]
g2 = argv[7]
maxLagCorEdges(exprfile, genefile, fileout, reps, cpus, 6, g1, g2)
if __name__ == "__main__":
main(sys.argv)
#pref="/Users/davidlgibbs/Dropbox/Research/Projects/Influence_Maximization_Problem/EserData/"
#pref = "/users/dgibbs/EserData/"
#genes = pref +"yeast_array_genesymbols.csv"
#gexpr = pref +"Eser_Averaged_Expression.txt"
#tout = "/Users/davidlgibbs/Desktop/x.txt"
#corEdges(gexpr, genes, tout, 20, 2, "YOX1", "MBP1")
| Gibbsdavidl/miergolf | src/corEdges.py | Python | bsd-3-clause | 3,914 | 0.020184 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ImagrUser',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, max_length=30, verbose_name='username', validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username.', 'invalid')])),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=75, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('our_date_joined_field', models.DateField(auto_now_add=True)),
('our_is_active_field', models.BooleanField(default=False)),
('following', models.ManyToManyField(related_name='followers', to=settings.AUTH_USER_MODEL)),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of his/her group.', verbose_name='groups')),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
'abstract': False,
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=20)),
('description', models.CharField(max_length=140)),
('date_uploaded', models.DateField(auto_now_add=True)),
('date_modified', models.DateField(auto_now=True)),
('date_published', models.DateField()),
('published', models.CharField(default=b'private', max_length=7, choices=[(b'private', b'Private Photo'), (b'shared', b'Shared Photo'), (b'public', b'Public Photo')])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=20)),
('description', models.CharField(max_length=140)),
('date_uploaded', models.DateField(auto_now_add=True)),
('date_modified', models.DateField(auto_now=True)),
('date_published', models.DateField()),
('published', models.CharField(default=b'private', max_length=7, choices=[(b'private', b'Private Photo'), (b'shared', b'Shared Photo'), (b'public', b'Public Photo')])),
('image_url', models.CharField(default=b'Photo Not Found', max_length=1024)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='album',
name='cover',
field=models.ForeignKey(related_name='Album_cover', to='imagr_app.Photo'),
preserve_default=True,
),
migrations.AddField(
model_name='album',
name='photos',
field=models.ManyToManyField(related_name='Album_photos', to='imagr_app.Photo'),
preserve_default=True,
),
migrations.AddField(
model_name='album',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
| CharlesGust/django-imagr | imagr_site/imagr_app/migrations/0001_initial.py | Python | mit | 5,413 | 0.00388 |
from argh import dispatch_commands
from argh.decorators import named, arg
from geobricks_raster_correlation.core.raster_correlation_core import get_correlation
@named('corr')
@arg('--bins', default=150, help='Bins')
def cli_get_correlation(file1, file2, **kwargs):
corr = get_correlation(file1, file2, kwargs['bins'])
print "Series: ", corr['series']
print "Stats: ", corr['stats']
def main():
dispatch_commands([cli_get_correlation])
if __name__ == '__main__':
main() | geobricks/geobricks_raster_correlation | geobricks_raster_correlation/cli/cli_argh.py | Python | gpl-2.0 | 493 | 0.006085 |
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.groups import dashboard
class Instances(horizon.Panel):
name = _("Groups")
slug = "instances"
dashboard.Groups.register(Instances)
| icloudrnd/automation_tools | openstack_dashboard/dashboards/groups/instances/panel.py | Python | apache-2.0 | 250 | 0.004 |
import striga
class CustomContext(striga.context):
pass
| ateska/striga2-sampleapp | app/appweb/context.py | Python | unlicense | 58 | 0.034483 |
# This file is part of h5py, a Python interface to the HDF5 library.
#
# http://www.h5py.org
#
# Copyright 2008-2013 Andrew Collette and contributors
#
# License: Standard 3-clause BSD; see "license.txt" for full license terms
# and contributor agreement.
from __future__ import absolute_import
import tempfile
import shutil
import os
from h5py import File
from ..common import TestCase
class TestFileID(TestCase):
def test_descriptor_core(self):
with File('TestFileID.test_descriptor_core', driver='core', backing_store=False) as f:
with self.assertRaises(NotImplementedError):
f.id.get_vfd_handle()
def test_descriptor_sec2(self):
dn_tmp = tempfile.mkdtemp('h5py.lowtest.test_h5f.TestFileID.test_descriptor_sec2')
fn_h5 = os.path.join(dn_tmp, 'test.h5')
try:
with File(fn_h5, driver='sec2') as f:
descriptor = f.id.get_vfd_handle()
self.assertNotEqual(descriptor, 0)
os.fsync(descriptor)
finally:
shutil.rmtree(dn_tmp)
class TestCacheConfig(TestCase):
def test_simple_gets(self):
dn_tmp = tempfile.mkdtemp('h5py.lowtest.test_h5f.TestFileID.TestCacheConfig.test_simple_gets')
fn_h5 = os.path.join(dn_tmp, 'test.h5')
try:
with File(fn_h5) as f:
hit_rate = f._id.get_mdc_hit_rate()
mdc_size = f._id.get_mdc_size()
finally:
shutil.rmtree(dn_tmp)
def test_hitrate_reset(self):
dn_tmp = tempfile.mkdtemp('h5py.lowtest.test_h5f.TestFileID.TestCacheConfig.test_hitrate_reset')
fn_h5 = os.path.join(dn_tmp, 'test.h5')
try:
with File(fn_h5) as f:
hit_rate = f._id.get_mdc_hit_rate()
f._id.reset_mdc_hit_rate_stats()
hit_rate = f._id.get_mdc_hit_rate()
assert hit_rate == 0
finally:
shutil.rmtree(dn_tmp)
def test_mdc_config_get(self):
dn_tmp = tempfile.mkdtemp('h5py.lowtest.test_h5f.TestFileID.TestCacheConfig.test_mdc_config_get')
fn_h5 = os.path.join(dn_tmp, 'test.h5')
try:
with File(fn_h5) as f:
conf = f._id.get_mdc_config()
f._id.set_mdc_config(conf)
finally:
shutil.rmtree(dn_tmp)
| ryfeus/lambda-packs | HDF4_H5_NETCDF/source2.7/h5py/tests/old/test_h5f.py | Python | mit | 2,360 | 0.002119 |
########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from . import TestScaleBase
class TestScaleCompute(TestScaleBase):
def test_compute_scale_in_compute(self):
expectations = self.deploy_app('scale4')
expectations['compute']['new']['install'] = 3
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'delta': -1})
expectations['compute']['existing']['install'] = 2
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
self.deployment_assertions(expectations)
def test_compute_scale_in_compute_ignore_failure_true(self):
expectations = self.deploy_app('scale_ignore_failure')
expectations['compute']['new']['install'] = 3
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'ignore_failure': True,
'delta': -1})
expectations['compute']['existing']['install'] = 2
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
self.deployment_assertions(expectations)
def test_compute_scale_in_compute_ignore_failure_false(self):
expectations = self.deploy_app('scale_ignore_failure')
expectations['compute']['new']['install'] = 3
self.deployment_assertions(expectations)
try:
self.scale(parameters={
'scalable_entity_name': 'compute',
'ignore_failure': False,
'delta': -1})
except RuntimeError as e:
self.assertIn(
"RuntimeError: Workflow failed: Task failed "
"'testmockoperations.tasks.mock_stop_failure'",
str(e))
else:
self.fail()
def test_compute_scale_out_and_in_compute_from_0(self):
expectations = self.deploy_app('scale10')
expectations['compute']['new']['install'] = 0
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute'})
expectations['compute']['new']['install'] = 1
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'delta': -1})
expectations['compute']['new']['install'] = 0
expectations['compute']['existing']['install'] = 0
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
self.deployment_assertions(expectations)
def test_compute_scale_in_2_compute(self):
expectations = self.deploy_app('scale4')
expectations['compute']['new']['install'] = 3
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'delta': -2})
expectations['compute']['existing']['install'] = 1
expectations['compute']['removed']['install'] = 2
expectations['compute']['removed']['uninstall'] = 2
self.deployment_assertions(expectations)
def test_db_contained_in_compute_scale_in_compute(self):
expectations = self.deploy_app('scale5')
expectations['compute']['new']['install'] = 2
expectations['db']['new']['install'] = 4
expectations['db']['new']['rel_install'] = 8
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'delta': -1})
expectations['compute']['existing']['install'] = 1
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
expectations['db']['existing']['install'] = 2
expectations['db']['existing']['rel_install'] = 4
expectations['db']['removed']['install'] = 2
expectations['db']['removed']['uninstall'] = 2
expectations['db']['removed']['rel_install'] = 4
expectations['db']['removed']['rel_uninstall'] = 4
self.deployment_assertions(expectations)
def test_db_connected_to_compute_scale_in_db(self):
expectations = self.deploy_app('scale6')
expectations['compute']['new']['install'] = 2
expectations['db']['new']['install'] = 2
expectations['db']['new']['rel_install'] = 8
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'db',
'delta': -1})
expectations['compute']['existing']['install'] = 2
expectations['db']['existing']['install'] = 1
expectations['db']['existing']['rel_install'] = 4
expectations['db']['removed']['install'] = 1
expectations['db']['removed']['uninstall'] = 1
expectations['db']['removed']['rel_install'] = 4
expectations['db']['removed']['rel_uninstall'] = 4
self.deployment_assertions(expectations)
def test_db_connected_to_compute_scale_in_compute(self):
expectations = self.deploy_app('scale6')
expectations['compute']['new']['install'] = 2
expectations['db']['new']['install'] = 2
expectations['db']['new']['rel_install'] = 8
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'delta': -1})
expectations['compute']['existing']['install'] = 1
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
expectations['db']['existing']['install'] = 2
expectations['db']['existing']['rel_install'] = 8
expectations['db']['existing']['rel_uninstall'] = 4
self.deployment_assertions(expectations)
def test_db_connected_to_compute_scale_in_and_out_compute_from_0(self):
expectations = self.deploy_app('scale11')
expectations['compute']['new']['install'] = 0
expectations['db']['new']['install'] = 1
expectations['db']['new']['rel_install'] = 0
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'delta': 1})
expectations['compute']['new']['install'] = 1
expectations['compute']['existing']['install'] = 0
expectations['db']['existing']['install'] = 1
expectations['db']['existing']['rel_install'] = 0
expectations['db']['existing']['scale_rel_install'] = 2
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'delta': -1})
expectations['compute']['new']['install'] = 0
expectations['compute']['existing']['install'] = 0
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
expectations['db']['existing']['install'] = 1
expectations['db']['existing']['scale_rel_install'] = 2
expectations['db']['existing']['rel_uninstall'] = 2
self.deployment_assertions(expectations)
def test_db_contained_in_compute_scale_in_db_scale_db(self):
expectations = self.deploy_app('scale5')
expectations['compute']['new']['install'] = 2
expectations['db']['new']['install'] = 4
expectations['db']['new']['rel_install'] = 8
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'db',
'delta': -1,
'scale_compute': False})
expectations['compute']['existing']['install'] = 2
expectations['db']['existing']['install'] = 2
expectations['db']['existing']['rel_install'] = 4
expectations['db']['removed']['install'] = 2
expectations['db']['removed']['uninstall'] = 2
expectations['db']['removed']['rel_install'] = 4
expectations['db']['removed']['rel_uninstall'] = 4
self.deployment_assertions(expectations)
def test_db_contained_in_compute_scale_in_db(self):
expectations = self.deploy_app('scale5')
expectations['compute']['new']['install'] = 2
expectations['db']['new']['install'] = 4
expectations['db']['new']['rel_install'] = 8
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'db',
'delta': -1,
'scale_compute': True})
expectations['compute']['existing']['install'] = 1
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
expectations['db']['existing']['install'] = 2
expectations['db']['existing']['rel_install'] = 4
expectations['db']['removed']['install'] = 2
expectations['db']['removed']['uninstall'] = 2
expectations['db']['removed']['rel_install'] = 4
expectations['db']['removed']['rel_uninstall'] = 4
self.deployment_assertions(expectations)
| isaac-s/cloudify-manager | tests/integration_tests/tests/agentless_tests/scale/test_scale_in.py | Python | apache-2.0 | 9,915 | 0 |
# Copyright 2009 Daniel Woodhouse
#
#This file is part of mtp-lastfm.
#
#mtp-lastfm is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#mtp-lastfm is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with mtp-lastfm. If not, see http://www.gnu.org/licenses/
import hashlib
import urllib2
import urllib
import webbrowser
import httplib
import xml.etree.ElementTree as ET
from httprequest import HttpRequest
import localisation
_ = localisation.set_get_text()
class LastfmWebService(object):
def __init__(self):
self.api_key = "2d21a4ab6f049a413eb27dbf9af10579"
self.api_2 = "6146d36f59da8720cd5f3dd2c8422da0"
self.url = "http://ws.audioscrobbler.com/2.0/"
def request_session_token(self):
"""returns a token which is used authenticate mtp-lastfm with the users account"""
data = {"api_key" : self.api_key, "method" : "auth.gettoken"}
data['api_sig'] = self.create_api_sig(data)
encoded_data = urllib.urlencode(data)
url = self.url + "?" + encoded_data
conn = urllib2.urlopen(url)
return self.parse_xml(conn, "token")
def parse_xml(self, conn, tag):
"""Searches an XML document for a single tag and returns its value"""
tree = ET.parse(conn)
iter = tree.getiterator()
for child in iter:
if child.tag == tag:
token = child.text
break
try:
return token
except:
return False
def parse_xml_doc(self, doc, tag):
"""Search an XML doc for tags and returns them all as a list"""
tree = ET.parse(doc)
iter = tree.getiterator()
tags = []
for child in iter:
if child.tag == tag:
tags.append(child.text)
return tags
def create_api_sig(self, dict):
"""dict is a dictionary of param_name : value sorted into the correct order"""
data = ""
items = dict.items()
items.sort()
for i in items:
for j in i:
data += j
data += self.api_2
api_sig = hashlib.md5(data.encode('UTF-8')).hexdigest()
return api_sig
def request_authorisation(self, token):
"""Opens a browser to request users authentication"""
encoded_values = urllib.urlencode({
"api_key" : self.api_key,
"token" : token
})
webbrowser.open("http://www.last.fm/api/auth/?" + encoded_values)
def create_web_service_session(self, token):
"""The final step, this creates a token with infinite lifespan store in db"""
data = {
"api_key" : self.api_key,
"method" : "auth.getsession",
"token" : token }
data['api_sig'] = self.create_api_sig(data)
encode_values = urllib.urlencode(data)
url = self.url + "?" + encode_values
try:
conn = urllib2.urlopen(url)
self.key = self.parse_xml(conn, "key")
return True, self.key
except urllib2.HTTPError:
return False, _("A problem occurred during authentication")
def love_track(self, artist, track, sk):
#Params
#track (Required) : A track name (utf8 encoded)
#artist (Required) : An artist name (utf8 encoded)
#api_key (Required) : A Last.fm API key.
#api_sig (Required) : A Last.fm method signature.
#sk (Required) : A session key generated by authenticating a user.
post_values = {
"track" : track,
"artist" : artist,
"api_key" : self.api_key,
"method" : "track.love",
"sk" : sk}
post_values['api_sig'] = self.create_api_sig(post_values)
post_values = urllib.urlencode(post_values)
req = urllib2.Request(url=self.url, data=post_values)
try:
url_handle = urllib2.urlopen(req)
response = url_handle.readlines()[1]
l = response.find('"') + 1
r = response.rfind('"')
response = response[l:r]
return response
except urllib2.URLError, error:
return error
except httplib.BadStatusLine, error:
return error
def get_user_top_tags(self, username, limit=15):
#method user.getTopTags
#Params
#user (Required) : The user name
#limit (Optional) : Limit the number of tags returned
#api_key (Required) : A Last.fm API key.
encoded_values = urllib.urlencode(
{"method" : "user.gettoptags",
"user" : username,
"limit" : limit,
"api_key" : self.api_key}
)
url = self.url + "?" + encoded_values
conn = HttpRequest(url)
xml_doc = conn.connect(xml=True)
return self.parse_xml_doc(xml_doc, "name")
def get_popular_tags(self, method, info_dict):
"""method is either artist.gettoptags or track.gettoptags"""
#Params
#track (Optional) : The track name in question
#artist (Required) : The artist name in question
#api_key (Required) : A Last.fm API key.
dict = {"method" : method,
"artist" : info_dict['Artist'],
"api_key" : self.api_key}
if method == "track.gettoptags":
dict['track'] = info_dict['Track']
encoded_values = urllib.urlencode(dict)
url = self.url + "?" + encoded_values
conn = HttpRequest(url)
xml_doc = conn.connect(xml=True)
return self.parse_xml_doc(xml_doc, "name")
def send_tags(self, method, info, tags, sk):
"""Sends tags to last.fm. method is one of:
album.addtags, artist.addtags or track.addtags
info_dict is the artist, track and album info
tags is a comma delimited list of no more than 10 tags"""
#All methods require these parameters:
#tags (Required) : A comma delimited list of user supplied tags to apply
#to this album. Accepts a maximum of 10 tags.
#api_key (Required) : A Last.fm API key.
#api_sig (Required)
#sk (Required)
#artist (Required) : The artist name in question
post_values = {
"method" : method,
"tags" : tags,
"api_key" : self.api_key,
"sk" : sk,
"artist" : info['Artist']}
#these methods require additional info:
#album.addTags -> album
#track.addTags -> track
if method == "album.addtags":
post_values['album'] = info['Album']
if method == "track.addtags":
post_values['track'] = info['Track']
post_values['api_sig'] = self.create_api_sig(post_values)
conn = HttpRequest(self.url, urllib.urlencode(post_values))
response = conn.connect()
| woodenbrick/mtp-lastfm | mtplastfm/webservices.py | Python | gpl-3.0 | 7,361 | 0.01277 |
import unittest
import unittest.mock
import random
import time
import pickle
import warnings
from functools import partial
from math import log, exp, pi, fsum, sin
from test import support
class TestBasicOps:
# Superclass with tests common to all generators.
# Subclasses must arrange for self.gen to retrieve the Random instance
# to be tested.
def randomlist(self, n):
"""Helper function to make a list of random numbers"""
return [self.gen.random() for i in range(n)]
def test_autoseed(self):
self.gen.seed()
state1 = self.gen.getstate()
time.sleep(0.1)
self.gen.seed() # diffent seeds at different times
state2 = self.gen.getstate()
self.assertNotEqual(state1, state2)
def test_saverestore(self):
N = 1000
self.gen.seed()
state = self.gen.getstate()
randseq = self.randomlist(N)
self.gen.setstate(state) # should regenerate the same sequence
self.assertEqual(randseq, self.randomlist(N))
def test_seedargs(self):
# Seed value with a negative hash.
class MySeed(object):
def __hash__(self):
return -1729
for arg in [None, 0, 0, 1, 1, -1, -1, 10**20, -(10**20),
3.14, 1+2j, 'a', tuple('abc'), MySeed()]:
self.gen.seed(arg)
for arg in [list(range(3)), dict(one=1)]:
self.assertRaises(TypeError, self.gen.seed, arg)
self.assertRaises(TypeError, self.gen.seed, 1, 2, 3, 4)
self.assertRaises(TypeError, type(self.gen), [])
@unittest.mock.patch('random._urandom') # os.urandom
def test_seed_when_randomness_source_not_found(self, urandom_mock):
# Random.seed() uses time.time() when an operating system specific
# randomness source is not found. To test this on machines were it
# exists, run the above test, test_seedargs(), again after mocking
# os.urandom() so that it raises the exception expected when the
# randomness source is not available.
urandom_mock.side_effect = NotImplementedError
self.test_seedargs()
def test_shuffle(self):
shuffle = self.gen.shuffle
lst = []
shuffle(lst)
self.assertEqual(lst, [])
lst = [37]
shuffle(lst)
self.assertEqual(lst, [37])
seqs = [list(range(n)) for n in range(10)]
shuffled_seqs = [list(range(n)) for n in range(10)]
for shuffled_seq in shuffled_seqs:
shuffle(shuffled_seq)
for (seq, shuffled_seq) in zip(seqs, shuffled_seqs):
self.assertEqual(len(seq), len(shuffled_seq))
self.assertEqual(set(seq), set(shuffled_seq))
# The above tests all would pass if the shuffle was a
# no-op. The following non-deterministic test covers that. It
# asserts that the shuffled sequence of 1000 distinct elements
# must be different from the original one. Although there is
# mathematically a non-zero probability that this could
# actually happen in a genuinely random shuffle, it is
# completely negligible, given that the number of possible
# permutations of 1000 objects is 1000! (factorial of 1000),
# which is considerably larger than the number of atoms in the
# universe...
lst = list(range(1000))
shuffled_lst = list(range(1000))
shuffle(shuffled_lst)
self.assertTrue(lst != shuffled_lst)
shuffle(lst)
self.assertTrue(lst != shuffled_lst)
def test_choice(self):
choice = self.gen.choice
with self.assertRaises(IndexError):
choice([])
self.assertEqual(choice([50]), 50)
self.assertIn(choice([25, 75]), [25, 75])
def test_sample(self):
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
N = 100
population = range(N)
for k in range(N+1):
s = self.gen.sample(population, k)
self.assertEqual(len(s), k)
uniq = set(s)
self.assertEqual(len(uniq), k)
self.assertTrue(uniq <= set(population))
self.assertEqual(self.gen.sample([], 0), []) # test edge case N==k==0
# Exception raised if size of sample exceeds that of population
self.assertRaises(ValueError, self.gen.sample, population, N+1)
def test_sample_distribution(self):
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n = 5
pop = range(n)
trials = 10000 # large num prevents false negatives without slowing normal case
def factorial(n):
if n == 0:
return 1
return n * factorial(n - 1)
for k in range(n):
expected = factorial(n) // factorial(n-k)
perms = {}
for i in range(trials):
perms[tuple(self.gen.sample(pop, k))] = None
if len(perms) == expected:
break
else:
self.fail()
def test_sample_inputs(self):
# SF bug #801342 -- population can be any iterable defining __len__()
self.gen.sample(set(range(20)), 2)
self.gen.sample(range(20), 2)
self.gen.sample(range(20), 2)
self.gen.sample(str('abcdefghijklmnopqrst'), 2)
self.gen.sample(tuple('abcdefghijklmnopqrst'), 2)
def test_sample_on_dicts(self):
self.assertRaises(TypeError, self.gen.sample, dict.fromkeys('abcdef'), 2)
def test_gauss(self):
# Ensure that the seed() method initializes all the hidden state. In
# particular, through 2.2.1 it failed to reset a piece of state used
# by (and only by) the .gauss() method.
for seed in 1, 12, 123, 1234, 12345, 123456, 654321:
self.gen.seed(seed)
x1 = self.gen.random()
y1 = self.gen.gauss(0, 1)
self.gen.seed(seed)
x2 = self.gen.random()
y2 = self.gen.gauss(0, 1)
self.assertEqual(x1, x2)
self.assertEqual(y1, y2)
def test_pickling(self):
state = pickle.dumps(self.gen)
origseq = [self.gen.random() for i in range(10)]
newgen = pickle.loads(state)
restoredseq = [newgen.random() for i in range(10)]
self.assertEqual(origseq, restoredseq)
def test_bug_1727780(self):
# verify that version-2-pickles can be loaded
# fine, whether they are created on 32-bit or 64-bit
# platforms, and that version-3-pickles load fine.
files = [("randv2_32.pck", 780),
("randv2_64.pck", 866),
("randv3.pck", 343)]
for file, value in files:
f = open(support.findfile(file),"rb")
r = pickle.load(f)
f.close()
self.assertEqual(int(r.random()*1000), value)
def test_bug_9025(self):
# Had problem with an uneven distribution in int(n*random())
# Verify the fix by checking that distributions fall within expectations.
n = 100000
randrange = self.gen.randrange
k = sum(randrange(6755399441055744) % 3 == 2 for i in range(n))
self.assertTrue(0.30 < k/n < .37, (k/n))
try:
random.SystemRandom().random()
except NotImplementedError:
SystemRandom_available = False
else:
SystemRandom_available = True
@unittest.skipUnless(SystemRandom_available, "random.SystemRandom not available")
class SystemRandom_TestBasicOps(TestBasicOps, unittest.TestCase):
gen = random.SystemRandom()
def test_autoseed(self):
# Doesn't need to do anything except not fail
self.gen.seed()
def test_saverestore(self):
self.assertRaises(NotImplementedError, self.gen.getstate)
self.assertRaises(NotImplementedError, self.gen.setstate, None)
def test_seedargs(self):
# Doesn't need to do anything except not fail
self.gen.seed(100)
def test_gauss(self):
self.gen.gauss_next = None
self.gen.seed(100)
self.assertEqual(self.gen.gauss_next, None)
def test_pickling(self):
self.assertRaises(NotImplementedError, pickle.dumps, self.gen)
def test_53_bits_per_float(self):
# This should pass whenever a C double has 53 bit precision.
span = 2 ** 53
cum = 0
for i in range(100):
cum |= int(self.gen.random() * span)
self.assertEqual(cum, span-1)
def test_bigrand(self):
# The randrange routine should build-up the required number of bits
# in stages so that all bit positions are active.
span = 2 ** 500
cum = 0
for i in range(100):
r = self.gen.randrange(span)
self.assertTrue(0 <= r < span)
cum |= r
self.assertEqual(cum, span-1)
def test_bigrand_ranges(self):
for i in [40,80, 160, 200, 211, 250, 375, 512, 550]:
start = self.gen.randrange(2 ** (i-2))
stop = self.gen.randrange(2 ** i)
if stop <= start:
continue
self.assertTrue(start <= self.gen.randrange(start, stop) < stop)
def test_rangelimits(self):
for start, stop in [(-2,0), (-(2**60)-2,-(2**60)), (2**60,2**60+2)]:
self.assertEqual(set(range(start,stop)),
set([self.gen.randrange(start,stop) for i in range(100)]))
def test_randrange_nonunit_step(self):
rint = self.gen.randrange(0, 10, 2)
self.assertIn(rint, (0, 2, 4, 6, 8))
rint = self.gen.randrange(0, 2, 2)
self.assertEqual(rint, 0)
def test_randrange_errors(self):
raises = partial(self.assertRaises, ValueError, self.gen.randrange)
# Empty range
raises(3, 3)
raises(-721)
raises(0, 100, -12)
# Non-integer start/stop
raises(3.14159)
raises(0, 2.71828)
# Zero and non-integer step
raises(0, 42, 0)
raises(0, 42, 3.14159)
def test_genrandbits(self):
# Verify ranges
for k in range(1, 1000):
self.assertTrue(0 <= self.gen.getrandbits(k) < 2**k)
# Verify all bits active
getbits = self.gen.getrandbits
for span in [1, 2, 3, 4, 31, 32, 32, 52, 53, 54, 119, 127, 128, 129]:
cum = 0
for i in range(100):
cum |= getbits(span)
self.assertEqual(cum, 2**span-1)
# Verify argument checking
self.assertRaises(TypeError, self.gen.getrandbits)
self.assertRaises(TypeError, self.gen.getrandbits, 1, 2)
self.assertRaises(ValueError, self.gen.getrandbits, 0)
self.assertRaises(ValueError, self.gen.getrandbits, -1)
self.assertRaises(TypeError, self.gen.getrandbits, 10.1)
def test_randbelow_logic(self, _log=log, int=int):
# check bitcount transition points: 2**i and 2**(i+1)-1
# show that: k = int(1.001 + _log(n, 2))
# is equal to or one greater than the number of bits in n
for i in range(1, 1000):
n = 1 << i # check an exact power of two
numbits = i+1
k = int(1.00001 + _log(n, 2))
self.assertEqual(k, numbits)
self.assertEqual(n, 2**(k-1))
n += n - 1 # check 1 below the next power of two
k = int(1.00001 + _log(n, 2))
self.assertIn(k, [numbits, numbits+1])
self.assertTrue(2**k > n > 2**(k-2))
n -= n >> 15 # check a little farther below the next power of two
k = int(1.00001 + _log(n, 2))
self.assertEqual(k, numbits) # note the stronger assertion
self.assertTrue(2**k > n > 2**(k-1)) # note the stronger assertion
class MersenneTwister_TestBasicOps(TestBasicOps, unittest.TestCase):
gen = random.Random()
def test_guaranteed_stable(self):
# These sequences are guaranteed to stay the same across versions of python
self.gen.seed(3456147, version=1)
self.assertEqual([self.gen.random().hex() for i in range(4)],
['0x1.ac362300d90d2p-1', '0x1.9d16f74365005p-1',
'0x1.1ebb4352e4c4dp-1', '0x1.1a7422abf9c11p-1'])
self.gen.seed("the quick brown fox", version=2)
self.assertEqual([self.gen.random().hex() for i in range(4)],
['0x1.1239ddfb11b7cp-3', '0x1.b3cbb5c51b120p-4',
'0x1.8c4f55116b60fp-1', '0x1.63eb525174a27p-1'])
def test_setstate_first_arg(self):
self.assertRaises(ValueError, self.gen.setstate, (1, None, None))
def test_setstate_middle_arg(self):
# Wrong type, s/b tuple
self.assertRaises(TypeError, self.gen.setstate, (2, None, None))
# Wrong length, s/b 625
self.assertRaises(ValueError, self.gen.setstate, (2, (1,2,3), None))
# Wrong type, s/b tuple of 625 ints
self.assertRaises(TypeError, self.gen.setstate, (2, ('a',)*625, None))
# Last element s/b an int also
self.assertRaises(TypeError, self.gen.setstate, (2, (0,)*624+('a',), None))
# Little trick to make "tuple(x % (2**32) for x in internalstate)"
# raise ValueError. I cannot think of a simple way to achieve this, so
# I am opting for using a generator as the middle argument of setstate
# which attempts to cast a NaN to integer.
state_values = self.gen.getstate()[1]
state_values = list(state_values)
state_values[-1] = float('nan')
state = (int(x) for x in state_values)
self.assertRaises(TypeError, self.gen.setstate, (2, state, None))
def test_referenceImplementation(self):
# Compare the python implementation with results from the original
# code. Create 2000 53-bit precision random floats. Compare only
# the last ten entries to show that the independent implementations
# are tracking. Here is the main() function needed to create the
# list of expected random numbers:
# void main(void){
# int i;
# unsigned long init[4]={61731, 24903, 614, 42143}, length=4;
# init_by_array(init, length);
# for (i=0; i<2000; i++) {
# printf("%.15f ", genrand_res53());
# if (i%5==4) printf("\n");
# }
# }
expected = [0.45839803073713259,
0.86057815201978782,
0.92848331726782152,
0.35932681119782461,
0.081823493762449573,
0.14332226470169329,
0.084297823823520024,
0.53814864671831453,
0.089215024911993401,
0.78486196105372907]
self.gen.seed(61731 + (24903<<32) + (614<<64) + (42143<<96))
actual = self.randomlist(2000)[-10:]
for a, e in zip(actual, expected):
self.assertAlmostEqual(a,e,places=14)
def test_strong_reference_implementation(self):
# Like test_referenceImplementation, but checks for exact bit-level
# equality. This should pass on any box where C double contains
# at least 53 bits of precision (the underlying algorithm suffers
# no rounding errors -- all results are exact).
from math import ldexp
expected = [0x0eab3258d2231f,
0x1b89db315277a5,
0x1db622a5518016,
0x0b7f9af0d575bf,
0x029e4c4db82240,
0x04961892f5d673,
0x02b291598e4589,
0x11388382c15694,
0x02dad977c9e1fe,
0x191d96d4d334c6]
self.gen.seed(61731 + (24903<<32) + (614<<64) + (42143<<96))
actual = self.randomlist(2000)[-10:]
for a, e in zip(actual, expected):
self.assertEqual(int(ldexp(a, 53)), e)
def test_long_seed(self):
# This is most interesting to run in debug mode, just to make sure
# nothing blows up. Under the covers, a dynamically resized array
# is allocated, consuming space proportional to the number of bits
# in the seed. Unfortunately, that's a quadratic-time algorithm,
# so don't make this horribly big.
seed = (1 << (10000 * 8)) - 1 # about 10K bytes
self.gen.seed(seed)
def test_53_bits_per_float(self):
# This should pass whenever a C double has 53 bit precision.
span = 2 ** 53
cum = 0
for i in range(100):
cum |= int(self.gen.random() * span)
self.assertEqual(cum, span-1)
def test_bigrand(self):
# The randrange routine should build-up the required number of bits
# in stages so that all bit positions are active.
span = 2 ** 500
cum = 0
for i in range(100):
r = self.gen.randrange(span)
self.assertTrue(0 <= r < span)
cum |= r
self.assertEqual(cum, span-1)
def test_bigrand_ranges(self):
for i in [40,80, 160, 200, 211, 250, 375, 512, 550]:
start = self.gen.randrange(2 ** (i-2))
stop = self.gen.randrange(2 ** i)
if stop <= start:
continue
self.assertTrue(start <= self.gen.randrange(start, stop) < stop)
def test_rangelimits(self):
for start, stop in [(-2,0), (-(2**60)-2,-(2**60)), (2**60,2**60+2)]:
self.assertEqual(set(range(start,stop)),
set([self.gen.randrange(start,stop) for i in range(100)]))
def test_genrandbits(self):
# Verify cross-platform repeatability
self.gen.seed(1234567)
self.assertEqual(self.gen.getrandbits(100),
97904845777343510404718956115)
# Verify ranges
for k in range(1, 1000):
self.assertTrue(0 <= self.gen.getrandbits(k) < 2**k)
# Verify all bits active
getbits = self.gen.getrandbits
for span in [1, 2, 3, 4, 31, 32, 32, 52, 53, 54, 119, 127, 128, 129]:
cum = 0
for i in range(100):
cum |= getbits(span)
self.assertEqual(cum, 2**span-1)
# Verify argument checking
self.assertRaises(TypeError, self.gen.getrandbits)
self.assertRaises(TypeError, self.gen.getrandbits, 'a')
self.assertRaises(TypeError, self.gen.getrandbits, 1, 2)
self.assertRaises(ValueError, self.gen.getrandbits, 0)
self.assertRaises(ValueError, self.gen.getrandbits, -1)
def test_randbelow_logic(self, _log=log, int=int):
# check bitcount transition points: 2**i and 2**(i+1)-1
# show that: k = int(1.001 + _log(n, 2))
# is equal to or one greater than the number of bits in n
for i in range(1, 1000):
n = 1 << i # check an exact power of two
numbits = i+1
k = int(1.00001 + _log(n, 2))
self.assertEqual(k, numbits)
self.assertEqual(n, 2**(k-1))
n += n - 1 # check 1 below the next power of two
k = int(1.00001 + _log(n, 2))
self.assertIn(k, [numbits, numbits+1])
self.assertTrue(2**k > n > 2**(k-2))
n -= n >> 15 # check a little farther below the next power of two
k = int(1.00001 + _log(n, 2))
self.assertEqual(k, numbits) # note the stronger assertion
self.assertTrue(2**k > n > 2**(k-1)) # note the stronger assertion
@unittest.mock.patch('random.Random.random')
def test_randbelow_overriden_random(self, random_mock):
# Random._randbelow() can only use random() when the built-in one
# has been overridden but no new getrandbits() method was supplied.
random_mock.side_effect = random.SystemRandom().random
maxsize = 1<<random.BPF
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
# Population range too large (n >= maxsize)
self.gen._randbelow(maxsize+1, maxsize = maxsize)
self.gen._randbelow(5640, maxsize = maxsize)
# This might be going too far to test a single line, but because of our
# noble aim of achieving 100% test coverage we need to write a case in
# which the following line in Random._randbelow() gets executed:
#
# rem = maxsize % n
# limit = (maxsize - rem) / maxsize
# r = random()
# while r >= limit:
# r = random() # <== *This line* <==<
#
# Therefore, to guarantee that the while loop is executed at least
# once, we need to mock random() so that it returns a number greater
# than 'limit' the first time it gets called.
n = 42
epsilon = 0.01
limit = (maxsize - (maxsize % n)) / maxsize
random_mock.side_effect = [limit + epsilon, limit - epsilon]
self.gen._randbelow(n, maxsize = maxsize)
def test_randrange_bug_1590891(self):
start = 1000000000000
stop = -100000000000000000000
step = -200
x = self.gen.randrange(start, stop, step)
self.assertTrue(stop < x <= start)
self.assertEqual((x+stop)%step, 0)
def gamma(z, sqrt2pi=(2.0*pi)**0.5):
# Reflection to right half of complex plane
if z < 0.5:
return pi / sin(pi*z) / gamma(1.0-z)
# Lanczos approximation with g=7
az = z + (7.0 - 0.5)
return az ** (z-0.5) / exp(az) * sqrt2pi * fsum([
0.9999999999995183,
676.5203681218835 / z,
-1259.139216722289 / (z+1.0),
771.3234287757674 / (z+2.0),
-176.6150291498386 / (z+3.0),
12.50734324009056 / (z+4.0),
-0.1385710331296526 / (z+5.0),
0.9934937113930748e-05 / (z+6.0),
0.1659470187408462e-06 / (z+7.0),
])
class TestDistributions(unittest.TestCase):
def test_zeroinputs(self):
# Verify that distributions can handle a series of zero inputs'
g = random.Random()
x = [g.random() for i in range(50)] + [0.0]*5
g.random = x[:].pop; g.uniform(1,10)
g.random = x[:].pop; g.paretovariate(1.0)
g.random = x[:].pop; g.expovariate(1.0)
g.random = x[:].pop; g.weibullvariate(1.0, 1.0)
g.random = x[:].pop; g.vonmisesvariate(1.0, 1.0)
g.random = x[:].pop; g.normalvariate(0.0, 1.0)
g.random = x[:].pop; g.gauss(0.0, 1.0)
g.random = x[:].pop; g.lognormvariate(0.0, 1.0)
g.random = x[:].pop; g.vonmisesvariate(0.0, 1.0)
g.random = x[:].pop; g.gammavariate(0.01, 1.0)
g.random = x[:].pop; g.gammavariate(1.0, 1.0)
g.random = x[:].pop; g.gammavariate(200.0, 1.0)
g.random = x[:].pop; g.betavariate(3.0, 3.0)
g.random = x[:].pop; g.triangular(0.0, 1.0, 1.0/3.0)
def test_avg_std(self):
# Use integration to test distribution average and standard deviation.
# Only works for distributions which do not consume variates in pairs
g = random.Random()
N = 5000
x = [i/float(N) for i in range(1,N)]
for variate, args, mu, sigmasqrd in [
(g.uniform, (1.0,10.0), (10.0+1.0)/2, (10.0-1.0)**2/12),
(g.triangular, (0.0, 1.0, 1.0/3.0), 4.0/9.0, 7.0/9.0/18.0),
(g.expovariate, (1.5,), 1/1.5, 1/1.5**2),
(g.vonmisesvariate, (1.23, 0), pi, pi**2/3),
(g.paretovariate, (5.0,), 5.0/(5.0-1),
5.0/((5.0-1)**2*(5.0-2))),
(g.weibullvariate, (1.0, 3.0), gamma(1+1/3.0),
gamma(1+2/3.0)-gamma(1+1/3.0)**2) ]:
g.random = x[:].pop
y = []
for i in range(len(x)):
try:
y.append(variate(*args))
except IndexError:
pass
s1 = s2 = 0
for e in y:
s1 += e
s2 += (e - mu) ** 2
N = len(y)
self.assertAlmostEqual(s1/N, mu, places=2,
msg='%s%r' % (variate.__name__, args))
self.assertAlmostEqual(s2/(N-1), sigmasqrd, places=2,
msg='%s%r' % (variate.__name__, args))
def test_constant(self):
g = random.Random()
N = 100
for variate, args, expected in [
(g.uniform, (10.0, 10.0), 10.0),
(g.triangular, (10.0, 10.0), 10.0),
(g.triangular, (10.0, 10.0, 10.0), 10.0),
(g.expovariate, (float('inf'),), 0.0),
(g.vonmisesvariate, (3.0, float('inf')), 3.0),
(g.gauss, (10.0, 0.0), 10.0),
(g.lognormvariate, (0.0, 0.0), 1.0),
(g.lognormvariate, (-float('inf'), 0.0), 0.0),
(g.normalvariate, (10.0, 0.0), 10.0),
(g.paretovariate, (float('inf'),), 1.0),
(g.weibullvariate, (10.0, float('inf')), 10.0),
(g.weibullvariate, (0.0, 10.0), 0.0),
]:
for i in range(N):
self.assertEqual(variate(*args), expected)
def test_von_mises_range(self):
# Issue 17149: von mises variates were not consistently in the
# range [0, 2*PI].
g = random.Random()
N = 100
for mu in 0.0, 0.1, 3.1, 6.2:
for kappa in 0.0, 2.3, 500.0:
for _ in range(N):
sample = g.vonmisesvariate(mu, kappa)
self.assertTrue(
0 <= sample <= random.TWOPI,
msg=("vonmisesvariate({}, {}) produced a result {} out"
" of range [0, 2*pi]").format(mu, kappa, sample))
def test_von_mises_large_kappa(self):
# Issue #17141: vonmisesvariate() was hang for large kappas
random.vonmisesvariate(0, 1e15)
random.vonmisesvariate(0, 1e100)
def test_gammavariate_errors(self):
# Both alpha and beta must be > 0.0
self.assertRaises(ValueError, random.gammavariate, -1, 3)
self.assertRaises(ValueError, random.gammavariate, 0, 2)
self.assertRaises(ValueError, random.gammavariate, 2, 0)
self.assertRaises(ValueError, random.gammavariate, 1, -3)
@unittest.mock.patch('random.Random.random')
def test_gammavariate_full_code_coverage(self, random_mock):
# There are three different possibilities in the current implementation
# of random.gammavariate(), depending on the value of 'alpha'. What we
# are going to do here is to fix the values returned by random() to
# generate test cases that provide 100% line coverage of the method.
# #1: alpha > 1.0: we want the first random number to be outside the
# [1e-7, .9999999] range, so that the continue statement executes
# once. The values of u1 and u2 will be 0.5 and 0.3, respectively.
random_mock.side_effect = [1e-8, 0.5, 0.3]
returned_value = random.gammavariate(1.1, 2.3)
self.assertAlmostEqual(returned_value, 2.53)
# #2: alpha == 1: first random number less than 1e-7 to that the body
# of the while loop executes once. Then random.random() returns 0.45,
# which causes while to stop looping and the algorithm to terminate.
random_mock.side_effect = [1e-8, 0.45]
returned_value = random.gammavariate(1.0, 3.14)
self.assertAlmostEqual(returned_value, 2.507314166123803)
# #3: 0 < alpha < 1. This is the most complex region of code to cover,
# as there are multiple if-else statements. Let's take a look at the
# source code, and determine the values that we need accordingly:
#
# while 1:
# u = random()
# b = (_e + alpha)/_e
# p = b*u
# if p <= 1.0: # <=== (A)
# x = p ** (1.0/alpha)
# else: # <=== (B)
# x = -_log((b-p)/alpha)
# u1 = random()
# if p > 1.0: # <=== (C)
# if u1 <= x ** (alpha - 1.0): # <=== (D)
# break
# elif u1 <= _exp(-x): # <=== (E)
# break
# return x * beta
#
# First, we want (A) to be True. For that we need that:
# b*random() <= 1.0
# r1 = random() <= 1.0 / b
#
# We now get to the second if-else branch, and here, since p <= 1.0,
# (C) is False and we take the elif branch, (E). For it to be True,
# so that the break is executed, we need that:
# r2 = random() <= _exp(-x)
# r2 <= _exp(-(p ** (1.0/alpha)))
# r2 <= _exp(-((b*r1) ** (1.0/alpha)))
_e = random._e
_exp = random._exp
_log = random._log
alpha = 0.35
beta = 1.45
b = (_e + alpha)/_e
epsilon = 0.01
r1 = 0.8859296441566 # 1.0 / b
r2 = 0.3678794411714 # _exp(-((b*r1) ** (1.0/alpha)))
# These four "random" values result in the following trace:
# (A) True, (E) False --> [next iteration of while]
# (A) True, (E) True --> [while loop breaks]
random_mock.side_effect = [r1, r2 + epsilon, r1, r2]
returned_value = random.gammavariate(alpha, beta)
self.assertAlmostEqual(returned_value, 1.4499999999997544)
# Let's now make (A) be False. If this is the case, when we get to the
# second if-else 'p' is greater than 1, so (C) evaluates to True. We
# now encounter a second if statement, (D), which in order to execute
# must satisfy the following condition:
# r2 <= x ** (alpha - 1.0)
# r2 <= (-_log((b-p)/alpha)) ** (alpha - 1.0)
# r2 <= (-_log((b-(b*r1))/alpha)) ** (alpha - 1.0)
r1 = 0.8959296441566 # (1.0 / b) + epsilon -- so that (A) is False
r2 = 0.9445400408898141
# And these four values result in the following trace:
# (B) and (C) True, (D) False --> [next iteration of while]
# (B) and (C) True, (D) True [while loop breaks]
random_mock.side_effect = [r1, r2 + epsilon, r1, r2]
returned_value = random.gammavariate(alpha, beta)
self.assertAlmostEqual(returned_value, 1.5830349561760781)
@unittest.mock.patch('random.Random.gammavariate')
def test_betavariate_return_zero(self, gammavariate_mock):
# betavariate() returns zero when the Gamma distribution
# that it uses internally returns this same value.
gammavariate_mock.return_value = 0.0
self.assertEqual(0.0, random.betavariate(2.71828, 3.14159))
class TestModule(unittest.TestCase):
def testMagicConstants(self):
self.assertAlmostEqual(random.NV_MAGICCONST, 1.71552776992141)
self.assertAlmostEqual(random.TWOPI, 6.28318530718)
self.assertAlmostEqual(random.LOG4, 1.38629436111989)
self.assertAlmostEqual(random.SG_MAGICCONST, 2.50407739677627)
def test__all__(self):
# tests validity but not completeness of the __all__ list
self.assertTrue(set(random.__all__) <= set(dir(random)))
def test_random_subclass_with_kwargs(self):
# SF bug #1486663 -- this used to erroneously raise a TypeError
class Subclass(random.Random):
def __init__(self, newarg=None):
random.Random.__init__(self)
Subclass(newarg=1)
if __name__ == "__main__":
unittest.main()
| jiangzhuo/kbengine | kbe/src/lib/python/Lib/test/test_random.py | Python | lgpl-3.0 | 31,638 | 0.002497 |
from django.forms.widgets import TextInput
from django.core.urlresolvers import reverse
from django.conf import settings
from django.utils.safestring import mark_safe
class TagAutocompleteTagIt(TextInput):
def __init__(self, max_tags, *args, **kwargs):
self.max_tags = (
max_tags
if max_tags
else getattr(settings, "TAGGING_AUTOCOMPLETE_MAX_TAGS", 20)
)
super(TagAutocompleteTagIt, self).__init__(*args, **kwargs)
def render(self, name, value, attrs=None):
""" Render HTML code """
# django-tagging
case_sensitive = (
"false" if not getattr(settings, "FORCE_LOWERCASE_TAGS", False) else "false"
)
max_tag_lentgh = getattr(settings, "MAX_TAG_LENGTH", 50)
# django-tagging-autocomplete-tagit
autocomplete_min_length = getattr(
settings, "TAGGING_AUTOCOMPLETE_MIN_LENGTH", 1
)
remove_confirmation = (
"true"
if getattr(settings, "TAGGING_AUTOCOMPLETE_REMOVE_CONFIRMATION", True)
else "false"
)
animate = (
"true"
if getattr(settings, "TAGGING_AUTOCOMPLETE_ANIMATE", True)
else "false"
)
list_view = reverse("ac_tagging-list")
html = super(TagAutocompleteTagIt, self).render(name, value, attrs)
# Subclass this field in case you need to add some custom behaviour like custom callbacks
# js = u"""<script type="text/javascript">
# $(document).ready(function() {
# init_jQueryTagit({
# objectId: '%s',
# sourceUrl: '%s',
# fieldName: '%s',
# minLength: %s,
# removeConfirmation: %s,
# caseSensitive: %s,
# animate: %s,
# maxLength: %s,
# maxTags: %s,
# //onTagAdded : ac_tagginc_clean,
# //onTagRemoved: ac_tagginc_clean,
# onTagClicked: null,
# onMaxTagsExceeded: null,
# })
# });
# </script>""" % (attrs['id'], list_view, name, autocomplete_min_length, remove_confirmation, case_sensitive,
# animate, max_tag_lentgh, self.max_tags)
js = ""
return mark_safe("\n".join([html, js]))
class Media:
# JS Base url defaults to STATIC_URL/jquery-autocomplete/
js_base_url = getattr(
settings,
"TAGGING_AUTOCOMPLETE_JS_BASE_URL",
"%sjs/jquery-tag-it/" % settings.STATIC_URL,
)
# jQuery ui is loaded from google's CDN by default
jqueryui_default = (
"https://ajax.googleapis.com/ajax/libs/jqueryui/1.8.12/jquery-ui.min.js"
)
jqueryui_file = getattr(
settings, "TAGGING_AUTOCOMPLETE_JQUERY_UI_FILE", jqueryui_default
)
# if a custom jquery ui file has been specified
if jqueryui_file != jqueryui_default:
# determine path
jqueryui_file = "%s%s" % (js_base_url, jqueryui_file)
# load js
js = (
"%sac_tagging.js" % js_base_url,
jqueryui_file,
"%sjquery.tag-it.js" % js_base_url,
)
# custom css can also be overriden in settings
css_list = getattr(
settings,
"TAGGING_AUTOCOMPLETE_CSS",
["%scss/ui-autocomplete-tag-it.css" % js_base_url],
)
# check is a list, if is a string convert it to a list
if type(css_list) != list and type(css_list) == str:
css_list = [css_list]
css = {"screen": css_list}
def _format_value(self, value):
return value.replace(",", ", ")
def value_from_datadict(self, data, files, name):
current_value = data.get(name, None)
if current_value and current_value[-1] != ",":
current_value = u"%s," % current_value
# current_value = u'"%s"' % current_value
# current_value = u'%s' % current_value
return current_value
| hzlf/openbroadcast.org | website/apps/ac_tagging/widgets.py | Python | gpl-3.0 | 4,147 | 0.001206 |
from django import forms
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from apps.grid.fields import TitleField, UserModelChoiceField
from apps.grid.widgets import CommentInput
from .base_form import BaseForm
class DealActionCommentForm(BaseForm):
exclude_in_export = (
"tg_action_comment",
"source",
"id",
"assign_to_user",
"tg_feedback_comment",
"fully_updated",
)
NOT_PUBLIC_REASON_CHOICES = (
("", _("---------")),
(
"Temporary removal from PI after criticism",
_("Temporary removal from PI after criticism"),
),
("Research in progress", _("Research in progress")),
("Land Observatory Import", _("Land Observatory Import")),
)
form_title = _("Meta info")
tg_action = TitleField(required=False, label="", initial=_("Fully updated"))
tg_action_comment = forms.CharField(
required=True, label=_("Action comment"), widget=CommentInput
)
fully_updated = forms.BooleanField(required=False, label=_("Fully updated"))
# fully_updated_history = forms.CharField(
# required=False, label=_("Fully updated history"),
# widget=forms.Textarea(attrs={"readonly":True, "cols": 80, "rows": 5}))
tg_not_public = TitleField(required=False, label="", initial=_("Public deal"))
not_public = forms.BooleanField(
required=False,
label=_("Not public"),
help_text=_("Please specify in additional comment field"),
)
not_public_reason = forms.ChoiceField(
required=False, label=_("Reason"), choices=NOT_PUBLIC_REASON_CHOICES
)
tg_not_public_comment = forms.CharField(
required=False, label=_("Comment on not public"), widget=CommentInput
)
tg_imported = TitleField(required=False, label="", initial=_("Import history"))
# source = forms.CharField(
# required=False, label=_("Import source"),
# widget=forms.TextInput(attrs={'readonly': True}))
previous_identifier = forms.CharField(
required=False,
label=_("Previous identifier"),
widget=forms.TextInput(attrs={"size": "64", "readonly": True}),
)
tg_feedback = TitleField(required=False, label="", initial=_("Feedback"))
assign_to_user = UserModelChoiceField(
required=False,
label=_("Assign to"),
queryset=get_user_model().objects.none(),
empty_label=_("Unassigned"),
)
tg_feedback_comment = forms.CharField(
required=False, label=_("Feedback comment"), widget=CommentInput
)
class Meta:
name = "action_comment"
def __init__(self, *args, **kwargs):
super(DealActionCommentForm, self).__init__(*args, **kwargs)
self.fields["assign_to_user"].queryset = (
get_user_model()
.objects.filter(
is_active=True, groups__name__in=("Editors", "Administrators")
)
.order_by("first_name", "last_name")
)
def get_attributes(self, request=None):
# Remove action comment, this field is handled separately in DealBaseView
attributes = super(DealActionCommentForm, self).get_attributes(request)
del attributes["tg_action_comment"]
return attributes
@classmethod
def get_data(cls, activity, group=None, prefix=""):
data = super().get_data(activity, group, prefix)
# Remove action comment, due to an old bug it seems to exist as an attribute too
if "tg_action_comment" in data:
del data["tg_action_comment"]
# Get action comment
data["tg_action_comment"] = activity.comment or ""
return data
| sinnwerkstatt/landmatrix | apps/grid/forms/deal_action_comment_form.py | Python | agpl-3.0 | 3,733 | 0.001607 |
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
#-------------------------------------------------------------------
# tarfile.py
#-------------------------------------------------------------------
# Copyright (C) 2002 Lars Gustäbel <lars@gustaebel.de>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
"""Read from and write to tar format archives.
"""
__version__ = "$Revision$"
# $Source$
version = "0.9.0"
__author__ = "Lars Gustäbel (lars@gustaebel.de)"
__date__ = "$Date$"
__cvsid__ = "$Id$"
__credits__ = "Gustavo Niemeyer, Niels Gustäbel, Richard Townsend."
#---------
# Imports
#---------
import sys
import os
import shutil
import stat
import errno
import time
import struct
import copy
import re
import operator
try:
import grp, pwd
except ImportError:
grp = pwd = None
# from tarfile import *
__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
#---------------------------------------------------------
# tar constants
#---------------------------------------------------------
NUL = "\0" # the null character
BLOCKSIZE = 512 # length of processing blocks
RECORDSIZE = BLOCKSIZE * 20 # length of records
GNU_MAGIC = "ustar \0" # magic gnu tar string
POSIX_MAGIC = "ustar\x0000" # magic posix tar string
LENGTH_NAME = 100 # maximum length of a filename
LENGTH_LINK = 100 # maximum length of a linkname
LENGTH_PREFIX = 155 # maximum length of the prefix field
REGTYPE = "0" # regular file
AREGTYPE = "\0" # regular file
LNKTYPE = "1" # link (inside tarfile)
SYMTYPE = "2" # symbolic link
CHRTYPE = "3" # character special device
BLKTYPE = "4" # block special device
DIRTYPE = "5" # directory
FIFOTYPE = "6" # fifo special device
CONTTYPE = "7" # contiguous file
GNUTYPE_LONGNAME = "L" # GNU tar longname
GNUTYPE_LONGLINK = "K" # GNU tar longlink
GNUTYPE_SPARSE = "S" # GNU tar sparse file
XHDTYPE = "x" # POSIX.1-2001 extended header
XGLTYPE = "g" # POSIX.1-2001 global header
SOLARIS_XHDTYPE = "X" # Solaris extended header
USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
GNU_FORMAT = 1 # GNU tar format
PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
DEFAULT_FORMAT = GNU_FORMAT
#---------------------------------------------------------
# tarfile constants
#---------------------------------------------------------
# File types that tarfile supports:
SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
SYMTYPE, DIRTYPE, FIFOTYPE,
CONTTYPE, CHRTYPE, BLKTYPE,
GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# File types that will be treated as a regular file.
REGULAR_TYPES = (REGTYPE, AREGTYPE,
CONTTYPE, GNUTYPE_SPARSE)
# File types that are part of the GNU tar format.
GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# Fields from a pax header that override a TarInfo attribute.
PAX_FIELDS = ("path", "linkpath", "size", "mtime",
"uid", "gid", "uname", "gname")
# Fields in a pax header that are numbers, all other fields
# are treated as strings.
PAX_NUMBER_FIELDS = {
"atime": float,
"ctime": float,
"mtime": float,
"uid": int,
"gid": int,
"size": int
}
#---------------------------------------------------------
# Bits used in the mode field, values in octal.
#---------------------------------------------------------
S_IFLNK = 0120000 # symbolic link
S_IFREG = 0100000 # regular file
S_IFBLK = 0060000 # block device
S_IFDIR = 0040000 # directory
S_IFCHR = 0020000 # character device
S_IFIFO = 0010000 # fifo
TSUID = 04000 # set UID on execution
TSGID = 02000 # set GID on execution
TSVTX = 01000 # reserved
TUREAD = 0400 # read by owner
TUWRITE = 0200 # write by owner
TUEXEC = 0100 # execute/search by owner
TGREAD = 0040 # read by group
TGWRITE = 0020 # write by group
TGEXEC = 0010 # execute/search by group
TOREAD = 0004 # read by other
TOWRITE = 0002 # write by other
TOEXEC = 0001 # execute/search by other
#---------------------------------------------------------
# initialization
#---------------------------------------------------------
ENCODING = sys.getfilesystemencoding()
if ENCODING is None:
ENCODING = sys.getdefaultencoding()
#---------------------------------------------------------
# Some useful functions
#---------------------------------------------------------
def stn(s, length):
"""Convert a python string to a null-terminated string buffer.
"""
return s[:length] + (length - len(s)) * NUL
def nts(s):
"""Convert a null-terminated string field to a python string.
"""
# Use the string up to the first null char.
p = s.find("\0")
if p == -1:
return s
return s[:p]
def nti(s):
"""Convert a number field to a python number.
"""
# There are two possible encodings for a number field, see
# itn() below.
if s[0] != chr(0200):
try:
n = int(nts(s) or "0", 8)
except ValueError:
raise InvalidHeaderError("invalid header")
else:
n = 0L
for i in xrange(len(s) - 1):
n <<= 8
n += ord(s[i + 1])
return n
def itn(n, digits=8, format=DEFAULT_FORMAT):
"""Convert a python number to a number field.
"""
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0200 byte indicates this particular
# encoding, the following digits-1 bytes are a big-endian
# representation. This allows values up to (256**(digits-1))-1.
if 0 <= n < 8 ** (digits - 1):
s = "%0*o" % (digits - 1, n) + NUL
else:
if format != GNU_FORMAT or n >= 256 ** (digits - 1):
raise ValueError("overflow in number field")
if n < 0:
# XXX We mimic GNU tar's behaviour with negative numbers,
# this could raise OverflowError.
n = struct.unpack("L", struct.pack("l", n))[0]
s = ""
for i in xrange(digits - 1):
s = chr(n & 0377) + s
n >>= 8
s = chr(0200) + s
return s
def uts(s, encoding, errors):
"""Convert a unicode object to a string.
"""
if errors == "utf-8":
# An extra error handler similar to the -o invalid=UTF-8 option
# in POSIX.1-2001. Replace untranslatable characters with their
# UTF-8 representation.
try:
return s.encode(encoding, "strict")
except UnicodeEncodeError:
x = []
for c in s:
try:
x.append(c.encode(encoding, "strict"))
except UnicodeEncodeError:
x.append(c.encode("utf8"))
return "".join(x)
else:
return s.encode(encoding, errors)
def calc_chksums(buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
return unsigned_chksum, signed_chksum
def copyfileobj(src, dst, length=None):
"""Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content.
"""
if length == 0:
return
if length is None:
shutil.copyfileobj(src, dst)
return
BUFSIZE = 16 * 1024
blocks, remainder = divmod(length, BUFSIZE)
for b in xrange(blocks):
buf = src.read(BUFSIZE)
if len(buf) < BUFSIZE:
raise IOError("end of file reached")
dst.write(buf)
if remainder != 0:
buf = src.read(remainder)
if len(buf) < remainder:
raise IOError("end of file reached")
dst.write(buf)
return
filemode_table = (
((S_IFLNK, "l"),
(S_IFREG, "-"),
(S_IFBLK, "b"),
(S_IFDIR, "d"),
(S_IFCHR, "c"),
(S_IFIFO, "p")),
((TUREAD, "r"),),
((TUWRITE, "w"),),
((TUEXEC|TSUID, "s"),
(TSUID, "S"),
(TUEXEC, "x")),
((TGREAD, "r"),),
((TGWRITE, "w"),),
((TGEXEC|TSGID, "s"),
(TSGID, "S"),
(TGEXEC, "x")),
((TOREAD, "r"),),
((TOWRITE, "w"),),
((TOEXEC|TSVTX, "t"),
(TSVTX, "T"),
(TOEXEC, "x"))
)
def filemode(mode):
"""Convert a file's mode to a string of the form
-rwxrwxrwx.
Used by TarFile.list()
"""
perm = []
for table in filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
class TarError(Exception):
"""Base exception."""
pass
class ExtractError(TarError):
"""General exception for extract errors."""
pass
class ReadError(TarError):
"""Exception for unreadble tar archives."""
pass
class CompressionError(TarError):
"""Exception for unavailable compression methods."""
pass
class StreamError(TarError):
"""Exception for unsupported operations on stream-like TarFiles."""
pass
class HeaderError(TarError):
"""Base exception for header errors."""
pass
class EmptyHeaderError(HeaderError):
"""Exception for empty headers."""
pass
class TruncatedHeaderError(HeaderError):
"""Exception for truncated headers."""
pass
class EOFHeaderError(HeaderError):
"""Exception for end of file headers."""
pass
class InvalidHeaderError(HeaderError):
"""Exception for invalid headers."""
pass
class SubsequentHeaderError(HeaderError):
"""Exception for missing and invalid extended headers."""
pass
#---------------------------
# internal stream interface
#---------------------------
class _LowLevelFile:
"""Low-level file object. Supports reading and writing.
It is used instead of a regular file object for streaming
access.
"""
def __init__(self, name, mode):
mode = {
"r": os.O_RDONLY,
"w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
}[mode]
if hasattr(os, "O_BINARY"):
mode |= os.O_BINARY
self.fd = os.open(name, mode, 0666)
def close(self):
os.close(self.fd)
def read(self, size):
return os.read(self.fd, size)
def write(self, s):
os.write(self.fd, s)
class _Stream:
"""Class that serves as an adapter between TarFile and
a stream-like object. The stream-like object only
needs to have a read() or write() method and is accessed
blockwise. Use of gzip or bzip2 compression is possible.
A stream-like object could be for example: sys.stdin,
sys.stdout, a socket, a tape device etc.
_Stream is intended to be used only internally.
"""
def __init__(self, name, mode, comptype, fileobj, bufsize):
"""Construct a _Stream object.
"""
self._extfileobj = True
if fileobj is None:
fileobj = _LowLevelFile(name, mode)
self._extfileobj = False
if comptype == '*':
# Enable transparent compression detection for the
# stream interface
fileobj = _StreamProxy(fileobj)
comptype = fileobj.getcomptype()
self.name = name or ""
self.mode = mode
self.comptype = comptype
self.fileobj = fileobj
self.bufsize = bufsize
self.buf = ""
self.pos = 0L
self.closed = False
if comptype == "gz":
try:
import zlib
except ImportError:
raise CompressionError("zlib module is not available")
self.zlib = zlib
self.crc = zlib.crc32("") & 0xffffffffL
if mode == "r":
self._init_read_gz()
else:
self._init_write_gz()
if comptype == "bz2":
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if mode == "r":
self.dbuf = ""
self.cmp = bz2.BZ2Decompressor()
else:
self.cmp = bz2.BZ2Compressor()
def __del__(self):
if hasattr(self, "closed") and not self.closed:
self.close()
def _init_write_gz(self):
"""Initialize for writing with gzip compression.
"""
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
-self.zlib.MAX_WBITS,
self.zlib.DEF_MEM_LEVEL,
0)
timestamp = struct.pack("<L", long(time.time()))
self.__write("\037\213\010\010%s\002\377" % timestamp)
if self.name.endswith(".gz"):
self.name = self.name[:-3]
self.__write(self.name + NUL)
def write(self, s):
"""Write string s to the stream.
"""
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc) & 0xffffffffL
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s)
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
"""
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
def close(self):
"""Close the _Stream object. No operation should be
done on it afterwards.
"""
if self.closed:
return
if self.mode == "w" and self.comptype != "tar":
self.buf += self.cmp.flush()
if self.mode == "w" and self.buf:
self.fileobj.write(self.buf)
self.buf = ""
if self.comptype == "gz":
# The native zlib crc is an unsigned 32-bit integer, but
# the Python wrapper implicitly casts that to a signed C
# long. So, on a 32-bit box self.crc may "look negative",
# while the same crc on a 64-bit box may "look positive".
# To avoid irksome warnings from the `struct` module, force
# it to look positive on all boxes.
self.fileobj.write(struct.pack("<L", self.crc & 0xffffffffL))
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFFL))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def _init_read_gz(self):
"""Initialize for reading a gzip compressed fileobj.
"""
self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
self.dbuf = ""
# taken from gzip.GzipFile with some alterations
if self.__read(2) != "\037\213":
raise ReadError("not a gzip file")
if self.__read(1) != "\010":
raise CompressionError("unsupported compression method")
flag = ord(self.__read(1))
self.__read(6)
if flag & 4:
xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
self.read(xlen)
if flag & 8:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 16:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 2:
self.__read(2)
def tell(self):
"""Return the stream's file pointer position.
"""
return self.pos
def seek(self, pos=0):
"""Set the stream's file pointer to pos. Negative seeking
is forbidden.
"""
if pos - self.pos >= 0:
blocks, remainder = divmod(pos - self.pos, self.bufsize)
for i in xrange(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError("seeking backwards is not allowed")
return self.pos
def read(self, size=None):
"""Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
"""
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = "".join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
def _read(self, size):
"""Return size bytes from the stream.
"""
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
t = [self.dbuf]
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
try:
buf = self.cmp.decompress(buf)
except IOError:
raise ReadError("invalid compressed data")
t.append(buf)
c += len(buf)
t = "".join(t)
self.dbuf = t[size:]
return t[:size]
def __read(self, size):
"""Return size bytes from stream. If internal buffer is empty,
read another block from the stream.
"""
c = len(self.buf)
t = [self.buf]
while c < size:
buf = self.fileobj.read(self.bufsize)
if not buf:
break
t.append(buf)
c += len(buf)
t = "".join(t)
self.buf = t[size:]
return t[:size]
# class _Stream
class _StreamProxy(object):
"""Small proxy class that enables transparent compression
detection for the Stream interface (mode 'r|*').
"""
def __init__(self, fileobj):
self.fileobj = fileobj
self.buf = self.fileobj.read(BLOCKSIZE)
def read(self, size):
self.read = self.fileobj.read
return self.buf
def getcomptype(self):
if self.buf.startswith("\037\213\010"):
return "gz"
if self.buf.startswith("BZh91"):
return "bz2"
return "tar"
def close(self):
self.fileobj.close()
# class StreamProxy
class _BZ2Proxy(object):
"""Small proxy class that enables external file object
support for "r:bz2" and "w:bz2" modes. This is actually
a workaround for a limitation in bz2 module's BZ2File
class which (unlike gzip.GzipFile) has no support for
a file object argument.
"""
blocksize = 16 * 1024
def __init__(self, fileobj, mode):
self.fileobj = fileobj
self.mode = mode
self.name = getattr(self.fileobj, "name", None)
self.init()
def init(self):
import bz2
self.pos = 0
if self.mode == "r":
self.bz2obj = bz2.BZ2Decompressor()
self.fileobj.seek(0)
self.buf = ""
else:
self.bz2obj = bz2.BZ2Compressor()
def read(self, size):
b = [self.buf]
x = len(self.buf)
while x < size:
raw = self.fileobj.read(self.blocksize)
if not raw:
break
data = self.bz2obj.decompress(raw)
b.append(data)
x += len(data)
self.buf = "".join(b)
buf = self.buf[:size]
self.buf = self.buf[size:]
self.pos += len(buf)
return buf
def seek(self, pos):
if pos < self.pos:
self.init()
self.read(pos - self.pos)
def tell(self):
return self.pos
def write(self, data):
self.pos += len(data)
raw = self.bz2obj.compress(data)
self.fileobj.write(raw)
def close(self):
if self.mode == "w":
raw = self.bz2obj.flush()
self.fileobj.write(raw)
# class _BZ2Proxy
#------------------------
# Extraction file object
#------------------------
class _FileInFile(object):
"""A thin wrapper around an existing file object that
provides a part of its data as an individual file
object.
"""
def __init__(self, fileobj, offset, size, sparse=None):
self.fileobj = fileobj
self.offset = offset
self.size = size
self.sparse = sparse
self.position = 0
def tell(self):
"""Return the current file position.
"""
return self.position
def seek(self, position):
"""Seek to a position in the file.
"""
self.position = position
def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
if self.sparse is None:
return self.readnormal(size)
else:
return self.readsparse(size)
def readnormal(self, size):
"""Read operation for regular files.
"""
self.fileobj.seek(self.offset + self.position)
self.position += size
return self.fileobj.read(size)
def readsparse(self, size):
"""Read operation for sparse files.
"""
data = []
while size > 0:
buf = self.readsparsesection(size)
if not buf:
break
size -= len(buf)
data.append(buf)
return "".join(data)
def readsparsesection(self, size):
"""Read a single section of a sparse file.
"""
section = self.sparse.find(self.position)
if section is None:
return ""
size = min(size, section.offset + section.size - self.position)
if isinstance(section, _data):
realpos = section.realpos + self.position - section.offset
self.fileobj.seek(self.offset + realpos)
self.position += size
return self.fileobj.read(size)
else:
self.position += size
return NUL * size
#class _FileInFile
class ExFileObject(object):
"""File-like object for reading an archive member.
Is returned by TarFile.extractfile().
"""
blocksize = 1024
def __init__(self, tarfile, tarinfo):
self.fileobj = _FileInFile(tarfile.fileobj,
tarinfo.offset_data,
tarinfo.size,
getattr(tarinfo, "sparse", None))
self.name = tarinfo.name
self.mode = "r"
self.closed = False
self.size = tarinfo.size
self.position = 0
self.buffer = ""
def read(self, size=None):
"""Read at most size bytes from the file. If size is not
present or None, read all data until EOF is reached.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
buf = ""
if self.buffer:
if size is None:
buf = self.buffer
self.buffer = ""
else:
buf = self.buffer[:size]
self.buffer = self.buffer[size:]
if size is None:
buf += self.fileobj.read()
else:
buf += self.fileobj.read(size - len(buf))
self.position += len(buf)
return buf
def readline(self, size=-1):
"""Read one entire line from the file. If size is present
and non-negative, return a string with at most that
size, which may be an incomplete line.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if "\n" in self.buffer:
pos = self.buffer.find("\n") + 1
else:
buffers = [self.buffer]
while True:
buf = self.fileobj.read(self.blocksize)
buffers.append(buf)
if not buf or "\n" in buf:
self.buffer = "".join(buffers)
pos = self.buffer.find("\n") + 1
if pos == 0:
# no newline found.
pos = len(self.buffer)
break
if size != -1:
pos = min(size, pos)
buf = self.buffer[:pos]
self.buffer = self.buffer[pos:]
self.position += len(buf)
return buf
def readlines(self):
"""Return a list with all remaining lines.
"""
result = []
while True:
line = self.readline()
if not line: break
result.append(line)
return result
def tell(self):
"""Return the current file position.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
return self.position
def seek(self, pos, whence=os.SEEK_SET):
"""Seek to a position in the file.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if whence == os.SEEK_SET:
self.position = min(max(pos, 0), self.size)
elif whence == os.SEEK_CUR:
if pos < 0:
self.position = max(self.position + pos, 0)
else:
self.position = min(self.position + pos, self.size)
elif whence == os.SEEK_END:
self.position = max(min(self.size + pos, self.size), 0)
else:
raise ValueError("Invalid argument")
self.buffer = ""
self.fileobj.seek(self.position)
def close(self):
"""Close the file object.
"""
self.closed = True
def __iter__(self):
"""Get an iterator over the file's lines.
"""
while True:
line = self.readline()
if not line:
break
yield line
#class ExFileObject
#------------------
# Exported Classes
#------------------
class TarInfo(object):
"""Informational class which holds the details about an
archive member given by a tar header block.
TarInfo objects are returned by TarFile.getmember(),
TarFile.getmembers() and TarFile.gettarinfo() and are
usually created internally.
"""
def __init__(self, name=""):
"""Construct a TarInfo object. name is the optional name
of the member.
"""
self.name = name # member name
self.mode = 0644 # file permissions
self.uid = 0 # user id
self.gid = 0 # group id
self.size = 0 # file size
self.mtime = 0 # modification time
self.chksum = 0 # header checksum
self.type = REGTYPE # member type
self.linkname = "" # link name
self.uname = "" # user name
self.gname = "" # group name
self.devmajor = 0 # device major number
self.devminor = 0 # device minor number
self.offset = 0 # the tar header starts here
self.offset_data = 0 # the file's data starts here
self.pax_headers = {} # pax header information
# In pax headers the "name" and "linkname" field are called
# "path" and "linkpath".
def _getpath(self):
return self.name
def _setpath(self, name):
self.name = name
path = property(_getpath, _setpath)
def _getlinkpath(self):
return self.linkname
def _setlinkpath(self, linkname):
self.linkname = linkname
linkpath = property(_getlinkpath, _setlinkpath)
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
def get_info(self, encoding, errors):
"""Return the TarInfo's attributes as a dictionary.
"""
info = {
"name": self.name,
"mode": self.mode & 07777,
"uid": self.uid,
"gid": self.gid,
"size": self.size,
"mtime": self.mtime,
"chksum": self.chksum,
"type": self.type,
"linkname": self.linkname,
"uname": self.uname,
"gname": self.gname,
"devmajor": self.devmajor,
"devminor": self.devminor
}
if info["type"] == DIRTYPE and not info["name"].endswith("/"):
info["name"] += "/"
for key in ("name", "linkname", "uname", "gname"):
if type(info[key]) is unicode:
info[key] = info[key].encode(encoding, errors)
return info
def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="strict"):
"""Return a tar header as a string of 512 byte blocks.
"""
info = self.get_info(encoding, errors)
if format == USTAR_FORMAT:
return self.create_ustar_header(info)
elif format == GNU_FORMAT:
return self.create_gnu_header(info)
elif format == PAX_FORMAT:
return self.create_pax_header(info, encoding, errors)
else:
raise ValueError("invalid format")
def create_ustar_header(self, info):
"""Return the object as a ustar header block.
"""
info["magic"] = POSIX_MAGIC
if len(info["linkname"]) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"]) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"])
return self._create_header(info, USTAR_FORMAT)
def create_gnu_header(self, info):
"""Return the object as a GNU header block sequence.
"""
info["magic"] = GNU_MAGIC
buf = ""
if len(info["linkname"]) > LENGTH_LINK:
buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK)
if len(info["name"]) > LENGTH_NAME:
buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME)
return buf + self._create_header(info, GNU_FORMAT)
def create_pax_header(self, info, encoding, errors):
"""Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.
"""
info["magic"] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
# Test string fields for values that exceed the field length or cannot
# be represented in ASCII encoding.
for name, hname, length in (
("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
("uname", "uname", 32), ("gname", "gname", 32)):
if hname in pax_headers:
# The pax header has priority.
continue
val = info[name].decode(encoding, errors)
# Try to encode the string as ASCII.
try:
val.encode("ascii")
except UnicodeEncodeError:
pax_headers[hname] = val
continue
if len(info[name]) > length:
pax_headers[hname] = val
# Test number fields for values that exceed the field limit or values
# that like to be stored as float.
for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
if name in pax_headers:
# The pax header has priority. Avoid overflow.
info[name] = 0
continue
val = info[name]
if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
pax_headers[name] = unicode(val)
info[name] = 0
# Create a pax extended header if necessary.
if pax_headers:
buf = self._create_pax_generic_header(pax_headers)
else:
buf = ""
return buf + self._create_header(info, USTAR_FORMAT)
@classmethod
def create_pax_global_header(cls, pax_headers):
"""Return the object as a pax global header block sequence.
"""
return cls._create_pax_generic_header(pax_headers, type=XGLTYPE)
def _posix_split_name(self, name):
"""Split a name longer than 100 chars into a prefix
and a name part.
"""
prefix = name[:LENGTH_PREFIX + 1]
while prefix and prefix[-1] != "/":
prefix = prefix[:-1]
name = name[len(prefix):]
prefix = prefix[:-1]
if not prefix or len(name) > LENGTH_NAME:
raise ValueError("name is too long")
return prefix, name
@staticmethod
def _create_header(info, format):
"""Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
"""
parts = [
stn(info.get("name", ""), 100),
itn(info.get("mode", 0) & 07777, 8, format),
itn(info.get("uid", 0), 8, format),
itn(info.get("gid", 0), 8, format),
itn(info.get("size", 0), 12, format),
itn(info.get("mtime", 0), 12, format),
" ", # checksum field
info.get("type", REGTYPE),
stn(info.get("linkname", ""), 100),
stn(info.get("magic", POSIX_MAGIC), 8),
stn(info.get("uname", ""), 32),
stn(info.get("gname", ""), 32),
itn(info.get("devmajor", 0), 8, format),
itn(info.get("devminor", 0), 8, format),
stn(info.get("prefix", ""), 155)
]
buf = struct.pack("%ds" % BLOCKSIZE, "".join(parts))
chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + "%06o\0" % chksum + buf[-357:]
return buf
@staticmethod
def _create_payload(payload):
"""Return the string payload filled with zero bytes
up to the next 512 byte border.
"""
blocks, remainder = divmod(len(payload), BLOCKSIZE)
if remainder > 0:
payload += (BLOCKSIZE - remainder) * NUL
return payload
@classmethod
def _create_gnu_long_header(cls, name, type):
"""Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.
"""
name += NUL
info = {}
info["name"] = "././@LongLink"
info["type"] = type
info["size"] = len(name)
info["magic"] = GNU_MAGIC
# create extended header + name blocks.
return cls._create_header(info, USTAR_FORMAT) + \
cls._create_payload(name)
@classmethod
def _create_pax_generic_header(cls, pax_headers, type=XHDTYPE):
"""Return a POSIX.1-2001 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be unicode objects.
"""
records = []
for keyword, value in pax_headers.iteritems():
keyword = keyword.encode("utf8")
value = value.encode("utf8")
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records.append("%d %s=%s\n" % (p, keyword, value))
records = "".join(records)
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, USTAR_FORMAT) + \
cls._create_payload(records)
@classmethod
def frombuf(cls, buf):
"""Construct a TarInfo object from a 512 byte string buffer.
"""
if len(buf) == 0:
raise EmptyHeaderError("empty header")
if len(buf) != BLOCKSIZE:
raise TruncatedHeaderError("truncated header")
if buf.count(NUL) == BLOCKSIZE:
raise EOFHeaderError("end of file header")
chksum = nti(buf[148:156])
if chksum not in calc_chksums(buf):
raise InvalidHeaderError("bad checksum")
obj = cls()
obj.buf = buf
obj.name = nts(buf[0:100])
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257])
obj.uname = nts(buf[265:297])
obj.gname = nts(buf[297:329])
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500])
# Old V7 tar format represents a directory as a regular
# file with a trailing slash.
if obj.type == AREGTYPE and obj.name.endswith("/"):
obj.type = DIRTYPE
# Remove redundant slashes from directories.
if obj.isdir():
obj.name = obj.name.rstrip("/")
# Reconstruct a ustar longname.
if prefix and obj.type not in GNU_TYPES:
obj.name = prefix + "/" + obj.name
return obj
@classmethod
def fromtarfile(cls, tarfile):
"""Return the next TarInfo object from TarFile object
tarfile.
"""
buf = tarfile.fileobj.read(BLOCKSIZE)
obj = cls.frombuf(buf)
obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
return obj._proc_member(tarfile)
#--------------------------------------------------------------------------
# The following are methods that are called depending on the type of a
# member. The entry point is _proc_member() which can be overridden in a
# subclass to add custom _proc_*() methods. A _proc_*() method MUST
# implement the following
# operations:
# 1. Set self.offset_data to the position where the data blocks begin,
# if there is data that follows.
# 2. Set tarfile.offset to the position where the next member's header will
# begin.
# 3. Return self or another valid TarInfo object.
def _proc_member(self, tarfile):
"""Choose the right processing method depending on
the type and call it.
"""
if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self._proc_gnulong(tarfile)
elif self.type == GNUTYPE_SPARSE:
return self._proc_sparse(tarfile)
elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile)
def _proc_builtin(self, tarfile):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
"""
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
tarfile.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
def _proc_gnulong(self, tarfile):
"""Process the blocks that hold a GNU longname
or longlink member.
"""
buf = tarfile.fileobj.read(self._block(self.size))
# Fetch the next header and process it.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = self.offset
if self.type == GNUTYPE_LONGNAME:
next.name = nts(buf)
elif self.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf)
return next
def _proc_sparse(self, tarfile):
"""Process a GNU sparse header plus extra headers.
"""
buf = self.buf
sp = _ringbuffer()
pos = 386
lastpos = 0L
realpos = 0L
# There are 4 possible sparse structs in the
# first header.
for i in xrange(4):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset > lastpos:
sp.append(_hole(lastpos, offset - lastpos))
sp.append(_data(offset, numbytes, realpos))
realpos += numbytes
lastpos = offset + numbytes
pos += 24
isextended = ord(buf[482])
origsize = nti(buf[483:495])
# If the isextended flag is given,
# there are extra headers to process.
while isextended == 1:
buf = tarfile.fileobj.read(BLOCKSIZE)
pos = 0
for i in xrange(21):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset > lastpos:
sp.append(_hole(lastpos, offset - lastpos))
sp.append(_data(offset, numbytes, realpos))
realpos += numbytes
lastpos = offset + numbytes
pos += 24
isextended = ord(buf[504])
if lastpos < origsize:
sp.append(_hole(lastpos, origsize - lastpos))
self.sparse = sp
self.offset_data = tarfile.fileobj.tell()
tarfile.offset = self.offset_data + self._block(self.size)
self.size = origsize
return self
def _proc_pax(self, tarfile):
"""Process an extended or global header as described in
POSIX.1-2001.
"""
# Read the header information.
buf = tarfile.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(r"(\d+) ([^=]+)=", re.U)
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
keyword = keyword.decode("utf8")
value = value.decode("utf8")
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in SUPPORTED_TYPES:
offset += next._block(next.size)
tarfile.offset = offset
return next
def _apply_pax_info(self, pax_headers, encoding, errors):
"""Replace fields with supplemental information from a previous
pax extended or global header.
"""
for keyword, value in pax_headers.iteritems():
if keyword not in PAX_FIELDS:
continue
if keyword == "path":
value = value.rstrip("/")
if keyword in PAX_NUMBER_FIELDS:
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
else:
value = uts(value, encoding, errors)
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE
def isreg(self):
return self.type in REGULAR_TYPES
def isfile(self):
return self.isreg()
def isdir(self):
return self.type == DIRTYPE
def issym(self):
return self.type == SYMTYPE
def islnk(self):
return self.type == LNKTYPE
def ischr(self):
return self.type == CHRTYPE
def isblk(self):
return self.type == BLKTYPE
def isfifo(self):
return self.type == FIFOTYPE
def issparse(self):
return self.type == GNUTYPE_SPARSE
def isdev(self):
return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
# class TarInfo
class TarFile(object):
"""The TarFile Class provides an interface to tar archives.
"""
debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
dereference = False # If true, add content of linked file to the
# tar file, else the link.
ignore_zeros = False # If true, skips empty or invalid blocks and
# continues processing.
errorlevel = 1 # If 0, fatal errors only appear in debug
# messages (if debug >= 0). If > 0, errors
# are passed to the caller as exceptions.
format = DEFAULT_FORMAT # The format to use when creating an archive.
encoding = ENCODING # Encoding for 8-bit character strings.
errors = None # Error handler for unicode conversion.
tarinfo = TarInfo # The default TarInfo class to use.
fileobject = ExFileObject # The default ExFileObject class to use.
def __init__(self, name=None, mode="r", fileobj=None, format=None,
tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
errors=None, pax_headers=None, debug=None, errorlevel=None):
"""Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
read from an existing archive, 'a' to append data to an existing
file or 'w' to create a new file overwriting an existing one. `mode'
defaults to 'r'.
If `fileobj' is given, it is used for reading or writing data. If it
can be determined, `mode' is overridden by `fileobj's mode.
`fileobj' is not closed, when TarFile is closed.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
self.mode = mode
self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode]
if not fileobj:
if self.mode == "a" and not os.path.exists(name):
# Create nonexistent files in append mode.
self.mode = "w"
self._mode = "wb"
fileobj = bltn_open(name, self._mode)
self._extfileobj = False
else:
if name is None and hasattr(fileobj, "name"):
name = fileobj.name
if hasattr(fileobj, "mode"):
self._mode = fileobj.mode
self._extfileobj = True
self.name = os.path.abspath(name) if name else None
self.fileobj = fileobj
# Init attributes.
if format is not None:
self.format = format
if tarinfo is not None:
self.tarinfo = tarinfo
if dereference is not None:
self.dereference = dereference
if ignore_zeros is not None:
self.ignore_zeros = ignore_zeros
if encoding is not None:
self.encoding = encoding
if errors is not None:
self.errors = errors
elif mode == "r":
self.errors = "utf-8"
else:
self.errors = "strict"
if pax_headers is not None and self.format == PAX_FORMAT:
self.pax_headers = pax_headers
else:
self.pax_headers = {}
if debug is not None:
self.debug = debug
if errorlevel is not None:
self.errorlevel = errorlevel
# Init datastructures.
self.closed = False
self.members = [] # list of members as TarInfo objects
self._loaded = False # flag if all members have been read
self.offset = self.fileobj.tell()
# current position in the archive file
self.inodes = {} # dictionary caching the inodes of
# archive members already added
try:
if self.mode == "r":
self.firstmember = None
self.firstmember = self.next()
if self.mode == "a":
# Move to the end of the archive,
# before the first empty block.
while True:
self.fileobj.seek(self.offset)
try:
tarinfo = self.tarinfo.fromtarfile(self)
self.members.append(tarinfo)
except EOFHeaderError:
self.fileobj.seek(self.offset)
break
except HeaderError, e:
raise ReadError(str(e))
if self.mode in "aw":
self._loaded = True
if self.pax_headers:
buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
self.fileobj.write(buf)
self.offset += len(buf)
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
def _getposix(self):
return self.format == USTAR_FORMAT
def _setposix(self, value):
import warnings
warnings.warn("use the format attribute instead", DeprecationWarning,
2)
if value:
self.format = USTAR_FORMAT
else:
self.format = GNU_FORMAT
posix = property(_getposix, _setposix)
#--------------------------------------------------------------------------
# Below are the classmethods which act as alternate constructors to the
# TarFile class. The open() method is the only one that is needed for
# public use; it is the "super"-constructor and is able to select an
# adequate "sub"-constructor for a particular compression using the mapping
# from OPEN_METH.
#
# This concept allows one to subclass TarFile without losing the comfort of
# the super-constructor. A sub-constructor is registered and made available
# by adding it to the mapping in OPEN_METH.
@classmethod
def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
"""Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
'r' or 'r:*' open for reading with transparent compression
'r:' open for reading exclusively uncompressed
'r:gz' open for reading with gzip compression
'r:bz2' open for reading with bzip2 compression
'a' or 'a:' open for appending, creating the file if necessary
'w' or 'w:' open for writing without compression
'w:gz' open for writing with gzip compression
'w:bz2' open for writing with bzip2 compression
'r|*' open a stream of tar blocks with transparent compression
'r|' open an uncompressed stream of tar blocks for reading
'r|gz' open a gzip compressed stream of tar blocks
'r|bz2' open a bzip2 compressed stream of tar blocks
'w|' open an uncompressed stream for writing
'w|gz' open a gzip compressed stream for writing
'w|bz2' open a bzip2 compressed stream for writing
"""
if not name and not fileobj:
raise ValueError("nothing to open")
if mode in ("r", "r:*"):
# Find out which *open() is appropriate for opening the file.
for comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
if fileobj is not None:
saved_pos = fileobj.tell()
try:
return func(name, "r", fileobj, **kwargs)
except (ReadError, CompressionError), e:
if fileobj is not None:
fileobj.seek(saved_pos)
continue
raise ReadError("file could not be opened successfully")
elif ":" in mode:
filemode, comptype = mode.split(":", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
# Select the *open() function according to
# given compression.
if comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError("unknown compression type %r" % comptype)
return func(name, filemode, fileobj, **kwargs)
elif "|" in mode:
filemode, comptype = mode.split("|", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
if filemode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
t = cls(name, filemode,
_Stream(name, filemode, comptype, fileobj, bufsize),
**kwargs)
t._extfileobj = False
return t
elif mode in "aw":
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError("undiscernible mode")
@classmethod
def taropen(cls, name, mode="r", fileobj=None, **kwargs):
"""Open uncompressed tar archive name for reading or writing.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
return cls(name, mode, fileobj, **kwargs)
@classmethod
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError("gzip module is not available")
try:
t = cls.taropen(name, mode,
gzip.GzipFile(name, mode + "b", compresslevel, fileobj),
**kwargs)
except IOError:
raise ReadError("not a gzip file")
t._extfileobj = False
return t
@classmethod
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'.")
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if fileobj is not None:
fileobj = _BZ2Proxy(fileobj, mode)
else:
fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (IOError, EOFError):
raise ReadError("not a bzip2 file")
t._extfileobj = False
return t
# All *open() methods are registered here.
OPEN_METH = {
"tar": "taropen", # uncompressed tar
"gz": "gzopen", # gzip compressed tar
"bz2": "bz2open" # bzip2 compressed tar
}
#--------------------------------------------------------------------------
# The public methods which TarFile provides:
def close(self):
"""Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
"""
if self.closed:
return
if self.mode in "aw":
self.fileobj.write(NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, RECORDSIZE)
if remainder > 0:
self.fileobj.write(NUL * (RECORDSIZE - remainder))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.
"""
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo
def getmembers(self):
"""Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.
"""
self._check()
if not self._loaded: # if we want to obtain a list of
self._load() # all members, we first have to
# scan the whole archive.
return self.members
def getnames(self):
"""Return the members of the archive as a list of their names. It has
the same order as the list returned by getmembers().
"""
return [tarinfo.name for tarinfo in self.getmembers()]
def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object for either the file `name' or the file
object `fileobj' (using os.fstat on its file descriptor). You can
modify some of the TarInfo's attributes before you add it using
addfile(). If given, `arcname' specifies an alternative name for the
file in the archive.
"""
self._check("aw")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
drv, arcname = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, "/")
arcname = arcname.lstrip("/")
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = self.tarinfo()
tarinfo.tarfile = self
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and statres.st_nlink > 1 and \
inode in self.inodes and arcname != self.inodes[inode]:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if type == REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0L
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo
def list(self, verbose=True):
"""Print a table of contents to sys.stdout. If `verbose' is False, only
the names of the members are printed. If it is True, an `ls -l'-like
output is produced.
"""
self._check()
for tarinfo in self:
if verbose:
print filemode(tarinfo.mode),
print "%s/%s" % (tarinfo.uname or tarinfo.uid,
tarinfo.gname or tarinfo.gid),
if tarinfo.ischr() or tarinfo.isblk():
print "%10s" % ("%d,%d" \
% (tarinfo.devmajor, tarinfo.devminor)),
else:
print "%10d" % tarinfo.size,
print "%d-%02d-%02d %02d:%02d:%02d" \
% time.localtime(tarinfo.mtime)[:6],
print tarinfo.name + ("/" if tarinfo.isdir() else ""),
if verbose:
if tarinfo.issym():
print "->", tarinfo.linkname,
if tarinfo.islnk():
print "link to", tarinfo.linkname,
print
def add(self, name, arcname=None, recursive=True, exclude=None, filter=None):
"""Add the file `name' to the archive. `name' may be any type of file
(directory, fifo, symbolic link, etc.). If given, `arcname'
specifies an alternative name for the file in the archive.
Directories are added recursively by default. This can be avoided by
setting `recursive' to False. `exclude' is a function that should
return True for each filename to be excluded. `filter' is a function
that expects a TarInfo object argument and returns the changed
TarInfo object, if it returns None the TarInfo object will be
excluded from the archive.
"""
self._check("aw")
if arcname is None:
arcname = name
# Exclude pathnames.
if exclude is not None:
import warnings
warnings.warn("use the filter argument instead",
DeprecationWarning, 2)
if exclude(name):
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Skip if somebody tries to archive the archive...
if self.name is not None and os.path.abspath(name) == self.name:
self._dbg(2, "tarfile: Skipped %r" % name)
return
self._dbg(1, name)
# Create a TarInfo object from the file.
tarinfo = self.gettarinfo(name, arcname)
if tarinfo is None:
self._dbg(1, "tarfile: Unsupported type %r" % name)
return
# Change or exclude the TarInfo object.
if filter is not None:
tarinfo = filter(tarinfo)
if tarinfo is None:
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Append the tar header and data to the archive.
if tarinfo.isreg():
f = bltn_open(name, "rb")
self.addfile(tarinfo, f)
f.close()
elif tarinfo.isdir():
self.addfile(tarinfo)
if recursive:
for f in os.listdir(name):
self.add(os.path.join(name, f), os.path.join(arcname, f),
recursive, exclude, filter)
else:
self.addfile(tarinfo)
def addfile(self, tarinfo, fileobj=None):
"""Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
given, tarinfo.size bytes are read from it and added to the archive.
You can create TarInfo objects using gettarinfo().
On Windows platforms, `fileobj' should always be opened with mode
'rb' to avoid irritation about the file size.
"""
self._check("aw")
tarinfo = copy.copy(tarinfo)
buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
self.fileobj.write(buf)
self.offset += len(buf)
# If there's data to follow, append it.
if fileobj is not None:
copyfileobj(fileobj, self.fileobj, tarinfo.size)
blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
if remainder > 0:
self.fileobj.write(NUL * (BLOCKSIZE - remainder))
blocks += 1
self.offset += blocks * BLOCKSIZE
self.members.append(tarinfo)
def extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0700
self.extract(tarinfo, path)
# Reverse sort directories.
directories.sort(key=operator.attrgetter('name'))
directories.reverse()
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError, e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extract(self, member, path=""):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'.
"""
self._check("r")
if isinstance(member, basestring):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name))
except EnvironmentError, e:
if self.errorlevel > 0:
raise
else:
if e.filename is None:
self._dbg(1, "tarfile: %s" % e.strerror)
else:
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
except ExtractError, e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extractfile(self, member):
"""Extract a member from the archive as a file object. `member' may be
a filename or a TarInfo object. If `member' is a regular file, a
file-like object is returned. If `member' is a link, a file-like
object is constructed from the link's target. If `member' is none of
the above, None is returned.
The file-like object is read-only and provides the following
methods: read(), readline(), readlines(), seek() and tell()
"""
self._check("r")
if isinstance(member, basestring):
tarinfo = self.getmember(member)
else:
tarinfo = member
if tarinfo.isreg():
return self.fileobject(self, tarinfo)
elif tarinfo.type not in SUPPORTED_TYPES:
# If a member's type is unknown, it is treated as a
# regular file.
return self.fileobject(self, tarinfo)
elif tarinfo.islnk() or tarinfo.issym():
if isinstance(self.fileobj, _Stream):
# A small but ugly workaround for the case that someone tries
# to extract a (sym)link as a file-object from a non-seekable
# stream of tar blocks.
raise StreamError("cannot extract (sym)link as file object")
else:
# A (sym)link's file object is its target's file object.
return self.extractfile(self._find_link_target(tarinfo))
else:
# If there's no data associated with the member (directory, chrdev,
# blkdev, etc.), return None instead of a file object.
return None
def _extract_member(self, tarinfo, targetpath):
"""Extract the TarInfo object tarinfo to a physical
file called targetpath.
"""
# Fetch the TarInfo object for the given name
# and build the destination pathname, replacing
# forward slashes to platform specific separators.
targetpath = targetpath.rstrip("/")
targetpath = targetpath.replace("/", os.sep)
# Create all upper directories.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
# Create directories that are not part of the archive with
# default permissions.
os.makedirs(upperdirs)
if tarinfo.islnk() or tarinfo.issym():
self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
else:
self._dbg(1, tarinfo.name)
if tarinfo.isreg():
self.makefile(tarinfo, targetpath)
elif tarinfo.isdir():
self.makedir(tarinfo, targetpath)
elif tarinfo.isfifo():
self.makefifo(tarinfo, targetpath)
elif tarinfo.ischr() or tarinfo.isblk():
self.makedev(tarinfo, targetpath)
elif tarinfo.islnk() or tarinfo.issym():
self.makelink(tarinfo, targetpath)
elif tarinfo.type not in SUPPORTED_TYPES:
self.makeunknown(tarinfo, targetpath)
else:
self.makefile(tarinfo, targetpath)
self.chown(tarinfo, targetpath)
if not tarinfo.issym():
self.chmod(tarinfo, targetpath)
self.utime(tarinfo, targetpath)
#--------------------------------------------------------------------------
# Below are the different file methods. They are called via
# _extract_member() when extract() is called. They can be replaced in a
# subclass to implement other functionality.
def makedir(self, tarinfo, targetpath):
"""Make a directory called targetpath.
"""
try:
# Use a safe mode for the directory, the real mode is set
# later in _extract_member().
os.mkdir(targetpath, 0700)
except EnvironmentError, e:
if e.errno != errno.EEXIST:
raise
def makefile(self, tarinfo, targetpath):
"""Make a file called targetpath.
"""
source = self.extractfile(tarinfo)
target = bltn_open(targetpath, "wb")
copyfileobj(source, target)
source.close()
target.close()
def makeunknown(self, tarinfo, targetpath):
"""Make a file from a TarInfo object with an unknown type
at targetpath.
"""
self.makefile(tarinfo, targetpath)
self._dbg(1, "tarfile: Unknown file type %r, " \
"extracted as regular file." % tarinfo.type)
def makefifo(self, tarinfo, targetpath):
"""Make a fifo called targetpath.
"""
if hasattr(os, "mkfifo"):
os.mkfifo(targetpath)
else:
raise ExtractError("fifo not supported by system")
def makedev(self, tarinfo, targetpath):
"""Make a character or block device called targetpath.
"""
if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
raise ExtractError("special devices not supported by system")
mode = tarinfo.mode
if tarinfo.isblk():
mode |= stat.S_IFBLK
else:
mode |= stat.S_IFCHR
os.mknod(targetpath, mode,
os.makedev(tarinfo.devmajor, tarinfo.devminor))
def makelink(self, tarinfo, targetpath):
"""Make a (symbolic) link called targetpath. If it cannot be created
(platform limitation), we try to make a copy of the referenced file
instead of a link.
"""
if hasattr(os, "symlink") and hasattr(os, "link"):
# For systems that support symbolic and hard links.
if tarinfo.issym():
if os.path.lexists(targetpath):
os.unlink(targetpath)
os.symlink(tarinfo.linkname, targetpath)
else:
# See extract().
if os.path.exists(tarinfo._link_target):
if os.path.lexists(targetpath):
os.unlink(targetpath)
os.link(tarinfo._link_target, targetpath)
else:
self._extract_member(self._find_link_target(tarinfo), targetpath)
else:
try:
self._extract_member(self._find_link_target(tarinfo), targetpath)
except KeyError:
raise ExtractError("unable to resolve link inside archive")
def chown(self, tarinfo, targetpath):
"""Set owner of targetpath according to tarinfo.
"""
if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
# We have to be root to do so.
try:
g = grp.getgrnam(tarinfo.gname)[2]
except KeyError:
try:
g = grp.getgrgid(tarinfo.gid)[2]
except KeyError:
g = os.getgid()
try:
u = pwd.getpwnam(tarinfo.uname)[2]
except KeyError:
try:
u = pwd.getpwuid(tarinfo.uid)[2]
except KeyError:
u = os.getuid()
try:
if tarinfo.issym() and hasattr(os, "lchown"):
os.lchown(targetpath, u, g)
else:
if sys.platform != "os2emx":
os.chown(targetpath, u, g)
except EnvironmentError, e:
raise ExtractError("could not change owner")
def chmod(self, tarinfo, targetpath):
"""Set file permissions of targetpath according to tarinfo.
"""
if hasattr(os, 'chmod'):
try:
os.chmod(targetpath, tarinfo.mode)
except EnvironmentError, e:
raise ExtractError("could not change mode")
def utime(self, tarinfo, targetpath):
"""Set modification time of targetpath according to tarinfo.
"""
if not hasattr(os, 'utime'):
return
try:
os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
except EnvironmentError, e:
raise ExtractError("could not change modification time")
#--------------------------------------------------------------------------
def next(self):
"""Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available.
"""
self._check("ra")
if self.firstmember is not None:
m = self.firstmember
self.firstmember = None
return m
# Read the next block.
self.fileobj.seek(self.offset)
tarinfo = None
while True:
try:
tarinfo = self.tarinfo.fromtarfile(self)
except EOFHeaderError, e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
except InvalidHeaderError, e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
elif self.offset == 0:
raise ReadError(str(e))
except EmptyHeaderError:
if self.offset == 0:
raise ReadError("empty file")
except TruncatedHeaderError, e:
if self.offset == 0:
raise ReadError(str(e))
except SubsequentHeaderError, e:
raise ReadError(str(e))
break
if tarinfo is not None:
self.members.append(tarinfo)
else:
self._loaded = True
return tarinfo
#--------------------------------------------------------------------------
# Little helper methods:
def _getmember(self, name, tarinfo=None, normalize=False):
"""Find an archive member by name from bottom to top.
If tarinfo is given, it is used as the starting point.
"""
# Ensure that all members have been loaded.
members = self.getmembers()
# Limit the member search list up to tarinfo.
if tarinfo is not None:
members = members[:members.index(tarinfo)]
if normalize:
name = os.path.normpath(name)
for member in reversed(members):
if normalize:
member_name = os.path.normpath(member.name)
else:
member_name = member.name
if name == member_name:
return member
def _load(self):
"""Read through the entire archive file and look for readable
members.
"""
while True:
tarinfo = self.next()
if tarinfo is None:
break
self._loaded = True
def _check(self, mode=None):
"""Check if TarFile is still open, and if the operation's mode
corresponds to TarFile's mode.
"""
if self.closed:
raise IOError("%s is closed" % self.__class__.__name__)
if mode is not None and self.mode not in mode:
raise IOError("bad operation for mode %r" % self.mode)
def _find_link_target(self, tarinfo):
"""Find the target member of a symlink or hardlink member in the
archive.
"""
if tarinfo.issym():
# Always search the entire archive.
linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname
limit = None
else:
# Search the archive before the link, because a hard link is
# just a reference to an already archived file.
linkname = tarinfo.linkname
limit = tarinfo
member = self._getmember(linkname, tarinfo=limit, normalize=True)
if member is None:
raise KeyError("linkname %r not found" % linkname)
return member
def __iter__(self):
"""Provide an iterator object.
"""
if self._loaded:
return iter(self.members)
else:
return TarIter(self)
def _dbg(self, level, msg):
"""Write debugging output to sys.stderr.
"""
if level <= self.debug:
print >> sys.stderr, msg
def __enter__(self):
self._check()
return self
def __exit__(self, type, value, traceback):
if type is None:
self.close()
else:
# An exception occurred. We must not call close() because
# it would try to write end-of-archive blocks and padding.
if not self._extfileobj:
self.fileobj.close()
self.closed = True
# class TarFile
class TarIter:
"""Iterator Class.
for tarinfo in TarFile(...):
suite...
"""
def __init__(self, tarfile):
"""Construct a TarIter object.
"""
self.tarfile = tarfile
self.index = 0
def __iter__(self):
"""Return iterator object.
"""
return self
def next(self):
"""Return the next item using TarFile's next() method.
When all members have been read, set TarFile as _loaded.
"""
# Fix for SF #1100429: Under rare circumstances it can
# happen that getmembers() is called during iteration,
# which will cause TarIter to stop prematurely.
if not self.tarfile._loaded:
tarinfo = self.tarfile.next()
if not tarinfo:
self.tarfile._loaded = True
raise StopIteration
else:
try:
tarinfo = self.tarfile.members[self.index]
except IndexError:
raise StopIteration
self.index += 1
return tarinfo
# Helper classes for sparse file support
class _section:
"""Base class for _data and _hole.
"""
def __init__(self, offset, size):
self.offset = offset
self.size = size
def __contains__(self, offset):
return self.offset <= offset < self.offset + self.size
class _data(_section):
"""Represent a data section in a sparse file.
"""
def __init__(self, offset, size, realpos):
_section.__init__(self, offset, size)
self.realpos = realpos
class _hole(_section):
"""Represent a hole section in a sparse file.
"""
pass
class _ringbuffer(list):
"""Ringbuffer class which increases performance
over a regular list.
"""
def __init__(self):
self.idx = 0
def find(self, offset):
idx = self.idx
while True:
item = self[idx]
if offset in item:
break
idx += 1
if idx == len(self):
idx = 0
if idx == self.idx:
# End of File
return None
self.idx = idx
return item
#---------------------------------------------
# zipfile compatible TarFile class
#---------------------------------------------
TAR_PLAIN = 0 # zipfile.ZIP_STORED
TAR_GZIPPED = 8 # zipfile.ZIP_DEFLATED
class TarFileCompat:
"""TarFile class compatible with standard module zipfile's
ZipFile class.
"""
def __init__(self, file, mode="r", compression=TAR_PLAIN):
from warnings import warnpy3k
warnpy3k("the TarFileCompat class has been removed in Python 3.0",
stacklevel=2)
if compression == TAR_PLAIN:
self.tarfile = TarFile.taropen(file, mode)
elif compression == TAR_GZIPPED:
self.tarfile = TarFile.gzopen(file, mode)
else:
raise ValueError("unknown compression constant")
if mode[0:1] == "r":
members = self.tarfile.getmembers()
for m in members:
m.filename = m.name
m.file_size = m.size
m.date_time = time.gmtime(m.mtime)[:6]
def namelist(self):
return map(lambda m: m.name, self.infolist())
def infolist(self):
return filter(lambda m: m.type in REGULAR_TYPES,
self.tarfile.getmembers())
def printdir(self):
self.tarfile.list()
def testzip(self):
return
def getinfo(self, name):
return self.tarfile.getmember(name)
def read(self, name):
return self.tarfile.extractfile(self.tarfile.getmember(name)).read()
def write(self, filename, arcname=None, compress_type=None):
self.tarfile.add(filename, arcname)
def writestr(self, zinfo, bytes):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import calendar
tinfo = TarInfo(zinfo.filename)
tinfo.size = len(bytes)
tinfo.mtime = calendar.timegm(zinfo.date_time)
self.tarfile.addfile(tinfo, StringIO(bytes))
def close(self):
self.tarfile.close()
#class TarFileCompat
#--------------------
# exported functions
#--------------------
def is_tarfile(name):
"""Return True if name points to a tar archive that we
are able to handle, else return False.
"""
try:
t = open(name)
t.close()
return True
except TarError:
return False
bltn_open = open
open = TarFile.open
| microdee/IronHydra | src/IronHydra/Lib/tarfile.py | Python | mit | 88,997 | 0.001888 |
"""config_test.py - test config module
RMM, 25 may 2019
This test suite checks the functionality of the config module
"""
from math import pi, log10
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import cleanup as mplcleanup
import numpy as np
import pytest
import control as ct
@pytest.mark.usefixtures("editsdefaults") # makes sure to reset the defaults
# to the test configuration
class TestConfig:
# Create a simple second order system to use for testing
sys = ct.tf([10], [1, 2, 1])
def test_set_defaults(self):
ct.config.set_defaults('config', test1=1, test2=2, test3=None)
assert ct.config.defaults['config.test1'] == 1
assert ct.config.defaults['config.test2'] == 2
assert ct.config.defaults['config.test3'] is None
@mplcleanup
def test_get_param(self):
assert ct.config._get_param('freqplot', 'dB')\
== ct.config.defaults['freqplot.dB']
assert ct.config._get_param('freqplot', 'dB', 1) == 1
ct.config.defaults['config.test1'] = 1
assert ct.config._get_param('config', 'test1', None) == 1
assert ct.config._get_param('config', 'test1', None, 1) == 1
ct.config.defaults['config.test3'] = None
assert ct.config._get_param('config', 'test3') is None
assert ct.config._get_param('config', 'test3', 1) == 1
assert ct.config._get_param('config', 'test3', None, 1) is None
assert ct.config._get_param('config', 'test4') is None
assert ct.config._get_param('config', 'test4', 1) == 1
assert ct.config._get_param('config', 'test4', 2, 1) == 2
assert ct.config._get_param('config', 'test4', None, 3) == 3
assert ct.config._get_param('config', 'test4', {'test4': 1}, None) == 1
def test_default_deprecation(self):
ct.config.defaults['deprecated.config.oldkey'] = 'config.newkey'
ct.config.defaults['deprecated.config.oldmiss'] = 'config.newmiss'
msgpattern = r'config\.oldkey.* has been renamed to .*config\.newkey'
ct.config.defaults['config.newkey'] = 1
with pytest.warns(FutureWarning, match=msgpattern):
assert ct.config.defaults['config.oldkey'] == 1
with pytest.warns(FutureWarning, match=msgpattern):
ct.config.defaults['config.oldkey'] = 2
with pytest.warns(FutureWarning, match=msgpattern):
assert ct.config.defaults['config.oldkey'] == 2
assert ct.config.defaults['config.newkey'] == 2
ct.config.set_defaults('config', newkey=3)
with pytest.warns(FutureWarning, match=msgpattern):
assert ct.config._get_param('config', 'oldkey') == 3
with pytest.warns(FutureWarning, match=msgpattern):
ct.config.set_defaults('config', oldkey=4)
with pytest.warns(FutureWarning, match=msgpattern):
assert ct.config.defaults['config.oldkey'] == 4
assert ct.config.defaults['config.newkey'] == 4
ct.config.defaults.update({'config.newkey': 5})
with pytest.warns(FutureWarning, match=msgpattern):
ct.config.defaults.update({'config.oldkey': 6})
with pytest.warns(FutureWarning, match=msgpattern):
assert ct.config.defaults.get('config.oldkey') == 6
with pytest.raises(KeyError):
with pytest.warns(FutureWarning, match=msgpattern):
ct.config.defaults['config.oldmiss']
with pytest.raises(KeyError):
ct.config.defaults['config.neverdefined']
# assert that reset defaults keeps the custom type
ct.config.reset_defaults()
with pytest.warns(FutureWarning,
match='bode.* has been renamed to.*freqplot'):
assert ct.config.defaults['bode.Hz'] \
== ct.config.defaults['freqplot.Hz']
@mplcleanup
def test_fbs_bode(self):
ct.use_fbs_defaults()
# Generate a Bode plot
plt.figure()
omega = np.logspace(-3, 3, 100)
ct.bode_plot(self.sys, omega)
# Get the magnitude line
mag_axis = plt.gcf().axes[0]
mag_line = mag_axis.get_lines()
mag_data = mag_line[0].get_data()
mag_x, mag_y = mag_data
# Make sure the x-axis is in rad/sec and y-axis is in natural units
np.testing.assert_almost_equal(mag_x[0], 0.001, decimal=6)
np.testing.assert_almost_equal(mag_y[0], 10, decimal=3)
# Get the phase line
phase_axis = plt.gcf().axes[1]
phase_line = phase_axis.get_lines()
phase_data = phase_line[0].get_data()
phase_x, phase_y = phase_data
# Make sure the x-axis is in rad/sec and y-axis is in degrees
np.testing.assert_almost_equal(phase_x[-1], 1000, decimal=0)
np.testing.assert_almost_equal(phase_y[-1], -180, decimal=0)
# Override the defaults and make sure that works as well
plt.figure()
ct.bode_plot(self.sys, omega, dB=True)
mag_x, mag_y = (((plt.gcf().axes[0]).get_lines())[0]).get_data()
np.testing.assert_almost_equal(mag_y[0], 20*log10(10), decimal=3)
plt.figure()
ct.bode_plot(self.sys, omega, Hz=True)
mag_x, mag_y = (((plt.gcf().axes[0]).get_lines())[0]).get_data()
np.testing.assert_almost_equal(mag_x[0], 0.001 / (2*pi), decimal=6)
plt.figure()
ct.bode_plot(self.sys, omega, deg=False)
phase_x, phase_y = (((plt.gcf().axes[1]).get_lines())[0]).get_data()
np.testing.assert_almost_equal(phase_y[-1], -pi, decimal=2)
@mplcleanup
def test_matlab_bode(self):
ct.use_matlab_defaults()
# Generate a Bode plot
plt.figure()
omega = np.logspace(-3, 3, 100)
ct.bode_plot(self.sys, omega)
# Get the magnitude line
mag_axis = plt.gcf().axes[0]
mag_line = mag_axis.get_lines()
mag_data = mag_line[0].get_data()
mag_x, mag_y = mag_data
# Make sure the x-axis is in rad/sec and y-axis is in dB
np.testing.assert_almost_equal(mag_x[0], 0.001, decimal=6)
np.testing.assert_almost_equal(mag_y[0], 20*log10(10), decimal=3)
# Get the phase line
phase_axis = plt.gcf().axes[1]
phase_line = phase_axis.get_lines()
phase_data = phase_line[0].get_data()
phase_x, phase_y = phase_data
# Make sure the x-axis is in rad/sec and y-axis is in degrees
np.testing.assert_almost_equal(phase_x[-1], 1000, decimal=1)
np.testing.assert_almost_equal(phase_y[-1], -180, decimal=0)
# Override the defaults and make sure that works as well
plt.figure()
ct.bode_plot(self.sys, omega, dB=True)
mag_x, mag_y = (((plt.gcf().axes[0]).get_lines())[0]).get_data()
np.testing.assert_almost_equal(mag_y[0], 20*log10(10), decimal=3)
plt.figure()
ct.bode_plot(self.sys, omega, Hz=True)
mag_x, mag_y = (((plt.gcf().axes[0]).get_lines())[0]).get_data()
np.testing.assert_almost_equal(mag_x[0], 0.001 / (2*pi), decimal=6)
plt.figure()
ct.bode_plot(self.sys, omega, deg=False)
phase_x, phase_y = (((plt.gcf().axes[1]).get_lines())[0]).get_data()
np.testing.assert_almost_equal(phase_y[-1], -pi, decimal=2)
@mplcleanup
def test_custom_bode_default(self):
ct.config.defaults['freqplot.dB'] = True
ct.config.defaults['freqplot.deg'] = True
ct.config.defaults['freqplot.Hz'] = True
# Generate a Bode plot
plt.figure()
omega = np.logspace(-3, 3, 100)
ct.bode_plot(self.sys, omega, dB=True)
mag_x, mag_y = (((plt.gcf().axes[0]).get_lines())[0]).get_data()
np.testing.assert_almost_equal(mag_y[0], 20*log10(10), decimal=3)
# Override defaults
plt.figure()
ct.bode_plot(self.sys, omega, Hz=True, deg=False, dB=True)
mag_x, mag_y = (((plt.gcf().axes[0]).get_lines())[0]).get_data()
phase_x, phase_y = (((plt.gcf().axes[1]).get_lines())[0]).get_data()
np.testing.assert_almost_equal(mag_x[0], 0.001 / (2*pi), decimal=6)
np.testing.assert_almost_equal(mag_y[0], 20*log10(10), decimal=3)
np.testing.assert_almost_equal(phase_y[-1], -pi, decimal=2)
@mplcleanup
def test_bode_number_of_samples(self):
# Set the number of samples (default is 50, from np.logspace)
mag_ret, phase_ret, omega_ret = ct.bode_plot(self.sys, omega_num=87)
assert len(mag_ret) == 87
# Change the default number of samples
ct.config.defaults['freqplot.number_of_samples'] = 76
mag_ret, phase_ret, omega_ret = ct.bode_plot(self.sys)
assert len(mag_ret) == 76
# Override the default number of samples
mag_ret, phase_ret, omega_ret = ct.bode_plot(self.sys, omega_num=87)
assert len(mag_ret) == 87
@mplcleanup
def test_bode_feature_periphery_decade(self):
# Generate a sample Bode plot to figure out the range it uses
ct.reset_defaults() # Make sure starting state is correct
mag_ret, phase_ret, omega_ret = ct.bode_plot(self.sys, Hz=False)
omega_min, omega_max = omega_ret[[0, -1]]
# Reset the periphery decade value (should add one decade on each end)
ct.config.defaults['freqplot.feature_periphery_decades'] = 2
mag_ret, phase_ret, omega_ret = ct.bode_plot(self.sys, Hz=False)
np.testing.assert_almost_equal(omega_ret[0], omega_min/10)
np.testing.assert_almost_equal(omega_ret[-1], omega_max * 10)
# Make sure it also works in rad/sec, in opposite direction
mag_ret, phase_ret, omega_ret = ct.bode_plot(self.sys, Hz=True)
omega_min, omega_max = omega_ret[[0, -1]]
ct.config.defaults['freqplot.feature_periphery_decades'] = 1
mag_ret, phase_ret, omega_ret = ct.bode_plot(self.sys, Hz=True)
np.testing.assert_almost_equal(omega_ret[0], omega_min*10)
np.testing.assert_almost_equal(omega_ret[-1], omega_max/10)
def test_reset_defaults(self):
ct.use_matlab_defaults()
ct.reset_defaults()
assert not ct.config.defaults['freqplot.dB']
assert ct.config.defaults['freqplot.deg']
assert not ct.config.defaults['freqplot.Hz']
assert ct.config.defaults['freqplot.number_of_samples'] == 1000
assert ct.config.defaults['freqplot.feature_periphery_decades'] == 1.0
def test_legacy_defaults(self):
with pytest.deprecated_call():
ct.use_legacy_defaults('0.8.3')
assert(isinstance(ct.ss(0, 0, 0, 1).D, np.matrix))
ct.reset_defaults()
assert isinstance(ct.ss(0, 0, 0, 1).D, np.ndarray)
assert not isinstance(ct.ss(0, 0, 0, 1).D, np.matrix)
ct.use_legacy_defaults('0.8.4')
assert ct.config.defaults['forced_response.return_x'] is True
ct.use_legacy_defaults('0.9.0')
assert isinstance(ct.ss(0, 0, 0, 1).D, np.ndarray)
assert not isinstance(ct.ss(0, 0, 0, 1).D, np.matrix)
# test that old versions don't raise a problem
ct.use_legacy_defaults('REL-0.1')
ct.use_legacy_defaults('control-0.3a')
ct.use_legacy_defaults('0.6c')
ct.use_legacy_defaults('0.8.2')
ct.use_legacy_defaults('0.1')
# Make sure that nonsense versions generate an error
with pytest.raises(ValueError):
ct.use_legacy_defaults("a.b.c")
with pytest.raises(ValueError):
ct.use_legacy_defaults("1.x.3")
@pytest.mark.parametrize("dt", [0, None])
def test_change_default_dt(self, dt):
"""Test that system with dynamics uses correct default dt"""
ct.set_defaults('control', default_dt=dt)
assert ct.ss(1, 0, 0, 1).dt == dt
assert ct.tf(1, [1, 1]).dt == dt
nlsys = ct.iosys.NonlinearIOSystem(
lambda t, x, u: u * x * x,
lambda t, x, u: x, inputs=1, outputs=1)
assert nlsys.dt == dt
def test_change_default_dt_static(self):
"""Test that static gain systems always have dt=None"""
ct.set_defaults('control', default_dt=0)
assert ct.tf(1, 1).dt is None
assert ct.ss(0, 0, 0, 1).dt is None
# TODO: add in test for static gain iosys
def test_get_param_last(self):
"""Test _get_param last keyword"""
kwargs = {'first': 1, 'second': 2}
with pytest.raises(TypeError, match="unrecognized keyword.*second"):
assert ct.config._get_param(
'config', 'first', kwargs, pop=True, last=True) == 1
assert ct.config._get_param(
'config', 'second', kwargs, pop=True, last=True) == 2
| python-control/python-control | control/tests/config_test.py | Python | bsd-3-clause | 12,736 | 0.000157 |
# Generated by Django 2.2.4 on 2019-08-26 18:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('helpdesk', '0027_auto_20190826_0700'),
]
operations = [
migrations.AlterField(
model_name='followup',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='User'),
),
migrations.AlterField(
model_name='kbitem',
name='voted_by',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='queue',
name='default_owner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='default_owner', to=settings.AUTH_USER_MODEL, verbose_name='Default owner'),
),
migrations.AlterField(
model_name='savedsearch',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='User'),
),
migrations.AlterField(
model_name='ticket',
name='assigned_to',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='assigned_to', to=settings.AUTH_USER_MODEL, verbose_name='Assigned to'),
),
migrations.AlterField(
model_name='ticketcc',
name='user',
field=models.ForeignKey(blank=True, help_text='User who wishes to receive updates for this ticket.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='User'),
),
migrations.AlterField(
model_name='usersettings',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='usersettings_helpdesk', to=settings.AUTH_USER_MODEL),
),
]
| auto-mat/klub | local_migrations/migrations_helpdesk/0028_auto_20190826_2034.py | Python | gpl-3.0 | 2,149 | 0.002792 |
# coding: utf8
"""
Delphi decision maker
"""
module = "delphi"
if deployment_settings.has_module(module):
########
# Groups
########
resourcename = "group"
tablename = module + "_" + resourcename
table = db.define_table(tablename, timestamp,
Field("name", notnull=True),
Field("description", "text"),
Field("active", "boolean", default=True),
migrate=migrate)
table.name.label = T("Group Title")
table.name.requires = [IS_NOT_EMPTY(), IS_NOT_IN_DB(db, "delphi_group.name")]
# CRUD Strings
ADD_GROUP = T("Add Group")
LIST_GROUPS = T("List Groups")
s3.crud_strings[tablename] = Storage(
title_create = ADD_GROUP,
title_display = T("Group Details"),
title_list = LIST_GROUPS,
title_update = T("Edit Group"),
title_search = T("Search Groups"),
subtitle_create = T("Add New Group"),
subtitle_list = T("Groups"),
label_list_button = LIST_GROUPS,
label_create_button = ADD_GROUP,
msg_record_created = T("Group added"),
msg_record_modified = T("Group updated"),
msg_record_deleted = T("Group deleted"),
msg_list_empty = T("No Groups currently defined"))
s3xrc.model.configure(table, list_fields=["id", "name", "description"])
##################
# Group Membership
##################
delphi_role_opts = {
1:T("Guest"),
2:T("Contributor"),
3:T("Participant"),
4:T("Moderator")
}
resourcename = "user_to_group"
tablename = module + "_" + resourcename
table = db.define_table(tablename,
Field("group_id", db.delphi_group, notnull=True),
Field("user_id", db.auth_user, notnull=True),
Field("description"),
Field("req", "boolean", default=False),
Field("status", "integer", default=1),
migrate=migrate)
table.group_id.label = T("Problem Group")
table.group_id.requires = IS_IN_DB(db, "delphi_group.id", "%(name)s")
table.group_id.represent = lambda id: (id and [db(db.delphi_group.id == id).select(limitby=(0, 1)).first().name] or ["None"])[0]
table.user_id.label = T("User")
table.user_id.represent = lambda user_id: (user_id == 0) and "-" or "%(first_name)s %(last_name)s [%(id)d]" % db(db.auth_user.id==user_id).select()[0]
#table.user_id.requires = IS_IN_DB(db, "auth_user.id", "%(first_name)s %(last_name)s [%(id)d]")
table.user_id.requires = IS_IN_DB(db, "auth_user.id", shn_user_represent)
table.status.requires = IS_IN_SET(delphi_role_opts, zero=None)
table.status.represent = lambda opt: delphi_role_opts.get(opt, UNKNOWN_OPT)
# CRUD Strings
ADD_MEMBERSHIP = T("Add Membership")
LIST_MEMBERSHIPS = T("List Memberships")
s3.crud_strings[tablename] = Storage(
title_create = ADD_MEMBERSHIP,
title_display = T("Membership Details"),
title_list = LIST_MEMBERSHIPS,
title_update = T("Edit Membership"),
title_search = T("Search Memberships"),
subtitle_create = T("Add New Membership"),
subtitle_list = T("Memberships"),
label_list_button = LIST_MEMBERSHIPS,
label_create_button = ADD_MEMBERSHIP,
msg_record_created = T("Membership added"),
msg_record_modified = T("Membership updated"),
msg_record_deleted = T("Membership deleted"),
msg_list_empty = T("No Memberships currently defined"))
s3xrc.model.configure(table, list_fields=["id", "group_id", "user_id", "status", "req"])
##########
# Problems
##########
resourcename = "problem"
tablename = module + "_" + resourcename
table = db.define_table(tablename,
Field("group_id", db.delphi_group, notnull=True),
Field("name", notnull=True),
Field("description", "text"),
Field("criteria", "text", notnull=True),
Field("active", "boolean", default=True),
Field("created_by", db.auth_user, writable=False, readable=False),
Field("last_modification", "datetime", default=request.now, writable=False),
migrate=migrate)
table.name.label = T("Problem Title")
table.name.requires = [IS_NOT_EMPTY(), IS_NOT_IN_DB(db, "delphi_problem.name")]
table.created_by.default = auth.user.id if auth.user else 0
table.group_id.label = T("Problem Group")
table.group_id.requires = IS_IN_DB(db, "delphi_group.id", "%(name)s")
table.group_id.represent = lambda id: (id and [db(db.delphi_group.id == id).select(limitby=(0, 1)).first().name] or ["None"])[0]
# CRUD Strings
ADD_PROBLEM = T("Add Problem")
LIST_PROBLEMS = T("List Problems")
s3.crud_strings[tablename] = Storage(
title_create = ADD_PROBLEM,
title_display = T("Problem Details"),
title_list = LIST_PROBLEMS,
title_update = T("Edit Problem"),
title_search = T("Search Problems"),
subtitle_create = T("Add New Problem"),
subtitle_list = T("Problems"),
label_list_button = LIST_PROBLEMS,
label_create_button = ADD_PROBLEM,
msg_record_created = T("Problem added"),
msg_record_modified = T("Problem updated"),
msg_record_deleted = T("Problem deleted"),
msg_list_empty = T("No Problems currently defined"))
s3xrc.model.configure(table, list_fields=["id", "group_id", "name", "created_by", "last_modification"])
def get_last_problem_id():
last_problems = db(db.delphi_problem.id > 0).select(db.delphi_problem.id, orderby =~ db.delphi_problem.id, limitby = (0, 1))
if last_problems:
return last_problems[0].id
###########
# Solutions
###########
resourcename = "solution"
tablename = module + "_" + resourcename
table = db.define_table(tablename,
Field("problem_id", db.delphi_problem, notnull=True),
Field("name"),
Field("description", "text"),
Field("suggested_by", db.auth_user, writable=False, readable=False),
Field("last_modification", "datetime", default=request.now, writable=False),
migrate=migrate)
table.name.requires = IS_NOT_EMPTY()
table.name.label = T("Title")
table.suggested_by.default = auth.user.id if auth.user else 0
table.problem_id.label = T("Problem")
# Breaks on 1st_run with prepopulate=False, so moved to controller
#table.problem_id.default = get_last_problem_id()
table.problem_id.requires = IS_IN_DB(db, "delphi_problem.id", "%(id)s: %(name)s")
table.problem_id.represent = lambda id: (id and [db(db.delphi_problem.id == id).select(limitby=(0, 1)).first().name] or ["None"])[0]
# CRUD Strings
ADD_SOLUTION = T("Add Solution")
LIST_SOLUTIONS = T("List Solutions")
s3.crud_strings[tablename] = Storage(
title_create = ADD_SOLUTION,
title_display = T("Solution Details"),
title_list = LIST_SOLUTIONS,
title_update = T("Edit Solution"),
title_search = T("Search Solutions"),
subtitle_create = T("Add New Solution"),
subtitle_list = T("Solutions"),
label_list_button = LIST_SOLUTIONS,
label_create_button = ADD_SOLUTION,
msg_record_created = T("Solution added"),
msg_record_modified = T("Solution updated"),
msg_record_deleted = T("Solution deleted"),
msg_list_empty = T("No Solutions currently defined"))
s3xrc.model.configure(table, list_fields=["id", "problem_id", "name", "suggested_by", "last_modification"])
#######
# Votes
#######
resourcename = "vote"
tablename = module + "_" + resourcename
table = db.define_table(tablename,
Field("problem_id", db.delphi_problem, notnull=True),
Field("solution_id", db.delphi_solution, notnull=True),
Field("rank", "integer"),
Field("user_id", db.auth_user, writable=False, readable=False),
Field("last_modification", "datetime", default=request.now, writable=False),
migrate=migrate)
table.problem_id.label = T("Problem")
table.solution_id.label = T("Solution")
table.user_id.label = T("User")
table.user_id.default = auth.user.id if auth.user else 0
#############
# Forum Posts
#############
resourcename = "forum_post"
tablename = module + "_" + resourcename
table = db.define_table(tablename,
Field("solution_id", db.delphi_solution, notnull=True),
Field("title"),
Field("post", "text", notnull=True),
Field("post_html", "text", default=""),
Field("user_id", db.auth_user, writable=False, readable=False),
Field("last_modification", "datetime", default=request.now, writable=False),
migrate=migrate)
table.solution_id.label = T("Solution")
table.user_id.label = T("User")
table.user_id.default = auth.user.id if auth.user else 0
| ksetyadi/Sahana-Eden | models/delphi.py | Python | mit | 9,575 | 0.014413 |
# -*- coding: utf-8 -*-
import logging
from wakatime.compat import u
try:
import mock
except ImportError:
import unittest.mock as mock
try:
# Python 2.6
import unittest2 as unittest
except ImportError:
# Python >= 2.7
import unittest
class TestCase(unittest.TestCase):
patch_these = []
def setUp(self):
# disable logging while testing
logging.disable(logging.CRITICAL)
self.patched = {}
if hasattr(self, 'patch_these'):
for patch_this in self.patch_these:
namespace = patch_this[0] if isinstance(patch_this, (list, set)) else patch_this
patcher = mock.patch(namespace)
mocked = patcher.start()
mocked.reset_mock()
self.patched[namespace] = mocked
if isinstance(patch_this, (list, set)) and len(patch_this) > 0:
retval = patch_this[1]
if callable(retval):
retval = retval()
mocked.return_value = retval
def tearDown(self):
mock.patch.stopall()
def normalize_list(self, items):
return sorted([u(x) for x in items])
def assertListsEqual(self, first_list, second_list):
self.assertEquals(self.normalize_list(first_list), self.normalize_list(second_list))
| Djabbz/wakatime | tests/utils.py | Python | bsd-3-clause | 1,355 | 0.002214 |
# CIS 410/510pm
# Homework 5 beta 0.0.1
# Cameron Palk
# May 2016
#
# Special thanks to Daniel Lowd for the skeletor code
import sys
import tokenize
from functools import reduce
global_card = []
num_vars = 0
''' Calc Strides
'''
def calcStrides( scope ):
rev_scope = list( reversed( scope ) )
res = [ 1 ] + [ 0 ] * ( len( scope ) - 1 )
for idx in range( 1, len( rev_scope ) ):
res[ idx ] = res[ idx - 1 ] * global_card[ rev_scope[ idx - 1 ] ]
stride = list( reversed( res ) )
return { scope[i] : stride[i] for i in range( len( scope ) ) }
# FACTOR CLASS DEFINITION
class Factor( dict ):
# Constructor
def __init__(self, scope_, vals_):
self.scope = scope_
self.vals = vals_
self.stride = calcStrides( scope_ )
#
# Are two object EQual, True of False
def __eq__(self, other):
return (self.scope == other.scope and
self.vals == other.vals and
self.stride == other.stride )
#
# A string used for printing the Factor Objects
def __repr__( self ):
style = "\n{0}\nScope: {1}\nStride: {2}\nCard: {3}\nVals:\n{4}\n{0}\n"
vertBar = ''.join( ['-'] * 50 )
return style.format( vertBar, self.scope, self.stride,
{ v : global_card[v] for v in self.scope },
'\n'.join( [ str( round( e, 3 ) ) for e in self.vals ] ) )
#
# What the '*' character does between our objects
def __mul__( self, other ):
new_scope = list( set( self.scope ).union( set( other.scope ) ) )
assignment = { e : 0 for e in new_scope }
card = { u : global_card[ u ] for u in new_scope }
val_count = reduce( lambda agg, x: agg * global_card[x], new_scope, 1 )
new_vals = [ 0 ] * val_count
idx1 = idx2 = 0
for i in range( 0, val_count ):
new_vals[ i ] = self.vals[ idx1 ] * other.vals[ idx2 ]
for rv in reversed( new_scope ):
if assignment[ rv ] == card[ rv ] - 1:
idx1 -= assignment[ rv ] * self.stride [ rv ] if rv in self.stride else 0
idx2 -= assignment[ rv ] * other.stride[ rv ] if rv in other.stride else 0
assignment[ rv ] = 0
else:
idx1 += self.stride [ rv ] if rv in self.scope else 0
idx2 += other.stride[ rv ] if rv in other.scope else 0
assignment[ rv ] += 1
break
#
return Factor( new_scope, new_vals )
#
# Sum out the variable and return a new Factor
def sumOut( self ):
# TODO Sum out a RV
return
#
# Helper Functions:
def containsRV( self, rv ):
return rv in self.scope
#
# END FACTOR CLASS DEFINITION
# IGNORE DANIELS READER BELOW
#
# Read in all tokens from stdin. Save it to a (global) buf that we use
# later. (Is there a better way to do this? Almost certainly.)
curr_token = 0
token_buf = []
def read_tokens():
global token_buf
for line in sys.stdin:
token_buf.extend(line.strip().split())
#
def next_token():
global curr_token
global token_buf
curr_token += 1
return token_buf[ curr_token - 1 ]
#
def next_int():
return int( next_token() )
#
def next_float():
return float( next_token() )
#
def read_model():
# Read in all tokens and throw away the first (expected to be "MARKOV")
read_tokens()
s = next_token()
# Get number of vars, followed by their ranges
global num_vars
num_vars = next_int()
global global_card
global_card = [ next_int() for i in range( num_vars ) ]
# Get number and scopes of factors
num_factors = int(next_token())
factor_scopes = []
for i in range(num_factors):
factor_scopes.append( [ next_int() for i in range( next_int() ) ] )
# Read in all factor values
factor_vals = []
for i in range(num_factors):
factor_vals.append( [ next_float() for i in range( next_int() ) ] )
return [ Factor(s,v) for (s,v) in zip( factor_scopes, factor_vals ) ]
#
# IGNORE DANIELS READER ABOVE
''' Factor Count With Var
@input factors Factors we want to look through
@input rv A RV
@return [int] The number of times the rv occures in the factors scopes
'''
def factorCountWithVar( factors, rv ):
return sum( [ 1 if f.containsRV( rv ) else 0 for f in factors ] )
''' Factor Stats
'''
def factorStats( factors, possibleVariables ):
return { v: factorCountWithVar(factors,v) for v in range( num_vars ) if v in possibleVariables }
''' Compute Partition Function
@input factors An array of Factor objects representing the graph
@return [float] The partition function ( why is it called a function? )
'''
def computePartitionFunction( factors ):
# TODO: Implement a faster way to computer partition function by summing out variables
f = reduce( Factor.__mul__, factors )
z = sum( f.vals )
return z
#
''' Main '''
def main():
# Read file
factors = read_model()
# Computer partition function
z = computePartitionFunction( factors )
# Print results
print( "Z =", z )
return
# Run main if this module is being run directly
if __name__ == '__main__':
main()
| CKPalk/ProbabilisticMethods | A5/hw5_start.py | Python | mit | 4,767 | 0.064821 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import cv2
import torch
from torchvision import transforms as T
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.structures.image_list import to_image_list
from maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker
from maskrcnn_benchmark import layers as L
from maskrcnn_benchmark.utils import cv2_util
class COCODemo(object):
# COCO categories for pretty print
CATEGORIES = [
"__background",
"person",
"bicycle",
"car",
"motorcycle",
"airplane",
"bus",
"train",
"truck",
"boat",
"traffic light",
"fire hydrant",
"stop sign",
"parking meter",
"bench",
"bird",
"cat",
"dog",
"horse",
"sheep",
"cow",
"elephant",
"bear",
"zebra",
"giraffe",
"backpack",
"umbrella",
"handbag",
"tie",
"suitcase",
"frisbee",
"skis",
"snowboard",
"sports ball",
"kite",
"baseball bat",
"baseball glove",
"skateboard",
"surfboard",
"tennis racket",
"bottle",
"wine glass",
"cup",
"fork",
"knife",
"spoon",
"bowl",
"banana",
"apple",
"sandwich",
"orange",
"broccoli",
"carrot",
"hot dog",
"pizza",
"donut",
"cake",
"chair",
"couch",
"potted plant",
"bed",
"dining table",
"toilet",
"tv",
"laptop",
"mouse",
"remote",
"keyboard",
"cell phone",
"microwave",
"oven",
"toaster",
"sink",
"refrigerator",
"book",
"clock",
"vase",
"scissors",
"teddy bear",
"hair drier",
"toothbrush",
]
def __init__(
self,
cfg,
confidence_threshold=0.7,
show_mask_heatmaps=False,
masks_per_dim=2,
min_image_size=224,
):
self.cfg = cfg.clone()
self.model = build_detection_model(cfg)
self.model.eval()
self.device = torch.device(cfg.MODEL.DEVICE)
self.model.to(self.device)
self.min_image_size = min_image_size
save_dir = cfg.OUTPUT_DIR
checkpointer = DetectronCheckpointer(cfg, self.model, save_dir=save_dir)
_ = checkpointer.load(cfg.MODEL.WEIGHT)
self.transforms = self.build_transform()
mask_threshold = -1 if show_mask_heatmaps else 0.5
self.masker = Masker(threshold=mask_threshold, padding=1)
# used to make colors for each class
self.palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])
self.cpu_device = torch.device("cpu")
self.confidence_threshold = confidence_threshold
self.show_mask_heatmaps = show_mask_heatmaps
self.masks_per_dim = masks_per_dim
def build_transform(self):
"""
Creates a basic transformation that was used to train the models
"""
cfg = self.cfg
# we are loading images with OpenCV, so we don't need to convert them
# to BGR, they are already! So all we need to do is to normalize
# by 255 if we want to convert to BGR255 format, or flip the channels
# if we want it to be in RGB in [0-1] range.
if cfg.INPUT.TO_BGR255:
to_bgr_transform = T.Lambda(lambda x: x * 255)
else:
to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])
normalize_transform = T.Normalize(
mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD
)
transform = T.Compose(
[
T.ToPILImage(),
T.Resize(self.min_image_size),
T.ToTensor(),
to_bgr_transform,
normalize_transform,
]
)
return transform
def run_on_opencv_image(self, image):
"""
Arguments:
image (np.ndarray): an image as returned by OpenCV
Returns:
prediction (BoxList): the detected objects. Additional information
of the detection properties can be found in the fields of
the BoxList via `prediction.fields()`
"""
predictions = self.compute_prediction(image)
top_predictions = self.select_top_predictions(predictions)
result = image.copy()
if self.show_mask_heatmaps:
return self.create_mask_montage(result, top_predictions)
result = self.overlay_boxes(result, top_predictions)
if self.cfg.MODEL.MASK_ON:
result = self.overlay_mask(result, top_predictions)
if self.cfg.MODEL.KEYPOINT_ON:
result = self.overlay_keypoints(result, top_predictions)
result = self.overlay_class_names(result, top_predictions)
return result
def compute_prediction(self, original_image):
"""
Arguments:
original_image (np.ndarray): an image as returned by OpenCV
Returns:
prediction (BoxList): the detected objects. Additional information
of the detection properties can be found in the fields of
the BoxList via `prediction.fields()`
"""
# apply pre-processing to image
image = self.transforms(original_image)
# convert to an ImageList, padded so that it is divisible by
# cfg.DATALOADER.SIZE_DIVISIBILITY
image_list = to_image_list(image, self.cfg.DATALOADER.SIZE_DIVISIBILITY)
image_list = image_list.to(self.device)
# compute predictions
with torch.no_grad():
predictions = self.model(image_list)
predictions = [o.to(self.cpu_device) for o in predictions]
# always single image is passed at a time
prediction = predictions[0]
# reshape prediction (a BoxList) into the original image size
height, width = original_image.shape[:-1]
prediction = prediction.resize((width, height))
if prediction.has_field("mask"):
# if we have masks, paste the masks in the right position
# in the image, as defined by the bounding boxes
masks = prediction.get_field("mask")
# always single image is passed at a time
masks = self.masker([masks], [prediction])[0]
prediction.add_field("mask", masks)
return prediction
def select_top_predictions(self, predictions):
"""
Select only predictions which have a `score` > self.confidence_threshold,
and returns the predictions in descending order of score
Arguments:
predictions (BoxList): the result of the computation by the model.
It should contain the field `scores`.
Returns:
prediction (BoxList): the detected objects. Additional information
of the detection properties can be found in the fields of
the BoxList via `prediction.fields()`
"""
scores = predictions.get_field("scores")
keep = torch.nonzero(scores > self.confidence_threshold).squeeze(1)
predictions = predictions[keep]
scores = predictions.get_field("scores")
_, idx = scores.sort(0, descending=True)
return predictions[idx]
def compute_colors_for_labels(self, labels):
"""
Simple function that adds fixed colors depending on the class
"""
colors = labels[:, None] * self.palette
colors = (colors % 255).numpy().astype("uint8")
return colors
def overlay_boxes(self, image, predictions):
"""
Adds the predicted boxes on top of the image
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `labels`.
"""
labels = predictions.get_field("labels")
boxes = predictions.bbox
colors = self.compute_colors_for_labels(labels).tolist()
for box, color in zip(boxes, colors):
box = box.to(torch.int64)
top_left, bottom_right = box[:2].tolist(), box[2:].tolist()
image = cv2.rectangle(
image, tuple(top_left), tuple(bottom_right), tuple(color), 1
)
return image
def overlay_mask(self, image, predictions):
"""
Adds the instances contours for each predicted object.
Each label has a different color.
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `mask` and `labels`.
"""
masks = predictions.get_field("mask").numpy()
labels = predictions.get_field("labels")
colors = self.compute_colors_for_labels(labels).tolist()
for mask, color in zip(masks, colors):
thresh = mask[0, :, :, None]
contours, hierarchy = cv2_util.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
)
image = cv2.drawContours(image, contours, -1, color, 3)
composite = image
return composite
def overlay_keypoints(self, image, predictions):
keypoints = predictions.get_field("keypoints")
kps = keypoints.keypoints
scores = keypoints.get_field("logits")
kps = torch.cat((kps[:, :, 0:2], scores[:, :, None]), dim=2).numpy()
for region in kps:
image = vis_keypoints(image, region.transpose((1, 0)))
return image
def create_mask_montage(self, image, predictions):
"""
Create a montage showing the probability heatmaps for each one one of the
detected objects
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `mask`.
"""
masks = predictions.get_field("mask")
masks_per_dim = self.masks_per_dim
masks = L.interpolate(
masks.float(), scale_factor=1 / masks_per_dim
).byte()
height, width = masks.shape[-2:]
max_masks = masks_per_dim ** 2
masks = masks[:max_masks]
# handle case where we have less detections than max_masks
if len(masks) < max_masks:
masks_padded = torch.zeros(max_masks, 1, height, width, dtype=torch.uint8)
masks_padded[: len(masks)] = masks
masks = masks_padded
masks = masks.reshape(masks_per_dim, masks_per_dim, height, width)
result = torch.zeros(
(masks_per_dim * height, masks_per_dim * width), dtype=torch.uint8
)
for y in range(masks_per_dim):
start_y = y * height
end_y = (y + 1) * height
for x in range(masks_per_dim):
start_x = x * width
end_x = (x + 1) * width
result[start_y:end_y, start_x:end_x] = masks[y, x]
return cv2.applyColorMap(result.numpy(), cv2.COLORMAP_JET)
def overlay_class_names(self, image, predictions):
"""
Adds detected class names and scores in the positions defined by the
top-left corner of the predicted bounding box
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `scores` and `labels`.
"""
scores = predictions.get_field("scores").tolist()
labels = predictions.get_field("labels").tolist()
labels = [self.CATEGORIES[i] for i in labels]
boxes = predictions.bbox
template = "{}: {:.2f}"
for box, score, label in zip(boxes, scores, labels):
x, y = box[:2]
s = template.format(label, score)
cv2.putText(
image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1
)
return image
import numpy as np
import matplotlib.pyplot as plt
from maskrcnn_benchmark.structures.keypoint import PersonKeypoints
def vis_keypoints(img, kps, kp_thresh=2, alpha=0.7):
"""Visualizes keypoints (adapted from vis_one_image).
kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob).
"""
dataset_keypoints = PersonKeypoints.NAMES
kp_lines = PersonKeypoints.CONNECTIONS
# Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv.
cmap = plt.get_cmap('rainbow')
colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]
colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors]
# Perform the drawing on a copy of the image, to allow for blending.
kp_mask = np.copy(img)
# Draw mid shoulder / mid hip first for better visualization.
mid_shoulder = (
kps[:2, dataset_keypoints.index('right_shoulder')] +
kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
sc_mid_shoulder = np.minimum(
kps[2, dataset_keypoints.index('right_shoulder')],
kps[2, dataset_keypoints.index('left_shoulder')])
mid_hip = (
kps[:2, dataset_keypoints.index('right_hip')] +
kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
sc_mid_hip = np.minimum(
kps[2, dataset_keypoints.index('right_hip')],
kps[2, dataset_keypoints.index('left_hip')])
nose_idx = dataset_keypoints.index('nose')
if sc_mid_shoulder > kp_thresh and kps[2, nose_idx] > kp_thresh:
cv2.line(
kp_mask, tuple(mid_shoulder), tuple(kps[:2, nose_idx]),
color=colors[len(kp_lines)], thickness=2, lineType=cv2.LINE_AA)
if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
cv2.line(
kp_mask, tuple(mid_shoulder), tuple(mid_hip),
color=colors[len(kp_lines) + 1], thickness=2, lineType=cv2.LINE_AA)
# Draw the keypoints.
for l in range(len(kp_lines)):
i1 = kp_lines[l][0]
i2 = kp_lines[l][1]
p1 = kps[0, i1], kps[1, i1]
p2 = kps[0, i2], kps[1, i2]
if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
cv2.line(
kp_mask, p1, p2,
color=colors[l], thickness=2, lineType=cv2.LINE_AA)
if kps[2, i1] > kp_thresh:
cv2.circle(
kp_mask, p1,
radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)
if kps[2, i2] > kp_thresh:
cv2.circle(
kp_mask, p2,
radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)
# Blend the keypoints.
return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0)
| mlperf/training_results_v0.7 | NVIDIA/benchmarks/maskrcnn/implementations/pytorch/demo/predictor.py | Python | apache-2.0 | 15,180 | 0.000791 |
from ...utils.code_utils import deprecate_module
deprecate_module("EMUtils", "waveform_utils", "0.16.0", error=True)
from .waveform_utils import *
| simpeg/simpeg | SimPEG/electromagnetics/utils/EMUtils.py | Python | mit | 149 | 0.006711 |
class Breakpoint():
def __init__(self, breakpointNumber):
self.breakpointNumber = breakpointNumber
class BreakpointPPUByTime(Breakpoint):
def __init__(self, breakpointNumber, scanline, tick):
Breakpoint.__init__(self, breakpointNumber)
self._scanline = scanline
self._tick = tick
def toString(self):
return 'Scanline = {self._scanline:s}, Tick = {self._tick:s}'.format(**locals())
class BreakpointPPUByAddress(Breakpoint):
def __init__(self, breakpointNumber, address):
Breakpoint.__init__(self, breakpointNumber)
self._address = address
def toString(self):
return 'Address = {self._address:s}'.format(**locals())
class BreakpointPPUByValue(Breakpoint):
def __init__(self, breakpointNumber, address, value):
Breakpoint.__init__(self, breakpointNumber)
self._address = address
self._value = value
def toString(self):
return 'Address = {self._address:s}, Value = {self._value:s}'.format(**locals()) | aLaix2/O-Nes-Sama | DebuggerClient/Breakpoint.py | Python | gpl-3.0 | 1,060 | 0.00283 |
#!/usr/bin/env python
from flask import Flask, jsonify, request, abort, render_template
app = Flask(__name__)
@app.route("/",methods=['GET'])
def index():
if request.method == 'GET':
return render_template('index.html')
else:
abort(400)
@app.route("/devices",methods=['GET'])
def devices():
if request.method == 'GET':
return render_template('devices.html')
else:
abort(400)
if __name__ == "__main__":
app.debug = True
app.run(host='0.0.0.0')
| mattiasgiese/squeezie | app/master.py | Python | mit | 476 | 0.021008 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
import sys
import array as pyarray
import unittest
import numpy as np
from pyspark import keyword_only
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.clustering import KMeans
from pyspark.ml.feature import Binarizer, Bucketizer, ElementwiseProduct, IndexToString, \
MaxAbsScaler, VectorSlicer, Word2Vec
from pyspark.ml.linalg import DenseVector, SparseVector, Vectors
from pyspark.ml.param import Param, Params, TypeConverters
from pyspark.ml.param.shared import HasInputCol, HasMaxIter, HasSeed
from pyspark.ml.wrapper import JavaParams
from pyspark.testing.mlutils import check_params, PySparkTestCase, SparkSessionTestCase
if sys.version > '3':
xrange = range
class ParamTypeConversionTests(PySparkTestCase):
"""
Test that param type conversion happens.
"""
def test_int(self):
lr = LogisticRegression(maxIter=5.0)
self.assertEqual(lr.getMaxIter(), 5)
self.assertTrue(type(lr.getMaxIter()) == int)
self.assertRaises(TypeError, lambda: LogisticRegression(maxIter="notAnInt"))
self.assertRaises(TypeError, lambda: LogisticRegression(maxIter=5.1))
def test_float(self):
lr = LogisticRegression(tol=1)
self.assertEqual(lr.getTol(), 1.0)
self.assertTrue(type(lr.getTol()) == float)
self.assertRaises(TypeError, lambda: LogisticRegression(tol="notAFloat"))
def test_vector(self):
ewp = ElementwiseProduct(scalingVec=[1, 3])
self.assertEqual(ewp.getScalingVec(), DenseVector([1.0, 3.0]))
ewp = ElementwiseProduct(scalingVec=np.array([1.2, 3.4]))
self.assertEqual(ewp.getScalingVec(), DenseVector([1.2, 3.4]))
self.assertRaises(TypeError, lambda: ElementwiseProduct(scalingVec=["a", "b"]))
def test_list(self):
l = [0, 1]
for lst_like in [l, np.array(l), DenseVector(l), SparseVector(len(l), range(len(l)), l),
pyarray.array('l', l), xrange(2), tuple(l)]:
converted = TypeConverters.toList(lst_like)
self.assertEqual(type(converted), list)
self.assertListEqual(converted, l)
def test_list_int(self):
for indices in [[1.0, 2.0], np.array([1.0, 2.0]), DenseVector([1.0, 2.0]),
SparseVector(2, {0: 1.0, 1: 2.0}), xrange(1, 3), (1.0, 2.0),
pyarray.array('d', [1.0, 2.0])]:
vs = VectorSlicer(indices=indices)
self.assertListEqual(vs.getIndices(), [1, 2])
self.assertTrue(all([type(v) == int for v in vs.getIndices()]))
self.assertRaises(TypeError, lambda: VectorSlicer(indices=["a", "b"]))
def test_list_float(self):
b = Bucketizer(splits=[1, 4])
self.assertEqual(b.getSplits(), [1.0, 4.0])
self.assertTrue(all([type(v) == float for v in b.getSplits()]))
self.assertRaises(TypeError, lambda: Bucketizer(splits=["a", 1.0]))
def test_list_list_float(self):
b = Bucketizer(splitsArray=[[-0.1, 0.5, 3], [-5, 1.5]])
self.assertEqual(b.getSplitsArray(), [[-0.1, 0.5, 3.0], [-5.0, 1.5]])
self.assertTrue(all([type(v) == list for v in b.getSplitsArray()]))
self.assertTrue(all([type(v) == float for v in b.getSplitsArray()[0]]))
self.assertTrue(all([type(v) == float for v in b.getSplitsArray()[1]]))
self.assertRaises(TypeError, lambda: Bucketizer(splitsArray=["a", 1.0]))
self.assertRaises(TypeError, lambda: Bucketizer(splitsArray=[[-5, 1.5], ["a", 1.0]]))
def test_list_string(self):
for labels in [np.array(['a', u'b']), ['a', u'b'], np.array(['a', 'b'])]:
idx_to_string = IndexToString(labels=labels)
self.assertListEqual(idx_to_string.getLabels(), ['a', 'b'])
self.assertRaises(TypeError, lambda: IndexToString(labels=['a', 2]))
def test_string(self):
lr = LogisticRegression()
for col in ['features', u'features', np.str_('features')]:
lr.setFeaturesCol(col)
self.assertEqual(lr.getFeaturesCol(), 'features')
self.assertRaises(TypeError, lambda: LogisticRegression(featuresCol=2.3))
def test_bool(self):
self.assertRaises(TypeError, lambda: LogisticRegression(fitIntercept=1))
self.assertRaises(TypeError, lambda: LogisticRegression(fitIntercept="false"))
class TestParams(HasMaxIter, HasInputCol, HasSeed):
"""
A subclass of Params mixed with HasMaxIter, HasInputCol and HasSeed.
"""
@keyword_only
def __init__(self, seed=None):
super(TestParams, self).__init__()
self._setDefault(maxIter=10)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, seed=None):
"""
setParams(self, seed=None)
Sets params for this test.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
class OtherTestParams(HasMaxIter, HasInputCol, HasSeed):
"""
A subclass of Params mixed with HasMaxIter, HasInputCol and HasSeed.
"""
@keyword_only
def __init__(self, seed=None):
super(OtherTestParams, self).__init__()
self._setDefault(maxIter=10)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, seed=None):
"""
setParams(self, seed=None)
Sets params for this test.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
class HasThrowableProperty(Params):
def __init__(self):
super(HasThrowableProperty, self).__init__()
self.p = Param(self, "none", "empty param")
@property
def test_property(self):
raise RuntimeError("Test property to raise error when invoked")
class ParamTests(SparkSessionTestCase):
def test_copy_new_parent(self):
testParams = TestParams()
# Copying an instantiated param should fail
with self.assertRaises(ValueError):
testParams.maxIter._copy_new_parent(testParams)
# Copying a dummy param should succeed
TestParams.maxIter._copy_new_parent(testParams)
maxIter = testParams.maxIter
self.assertEqual(maxIter.name, "maxIter")
self.assertEqual(maxIter.doc, "max number of iterations (>= 0).")
self.assertTrue(maxIter.parent == testParams.uid)
def test_param(self):
testParams = TestParams()
maxIter = testParams.maxIter
self.assertEqual(maxIter.name, "maxIter")
self.assertEqual(maxIter.doc, "max number of iterations (>= 0).")
self.assertTrue(maxIter.parent == testParams.uid)
def test_hasparam(self):
testParams = TestParams()
self.assertTrue(all([testParams.hasParam(p.name) for p in testParams.params]))
self.assertFalse(testParams.hasParam("notAParameter"))
self.assertTrue(testParams.hasParam(u"maxIter"))
def test_resolveparam(self):
testParams = TestParams()
self.assertEqual(testParams._resolveParam(testParams.maxIter), testParams.maxIter)
self.assertEqual(testParams._resolveParam("maxIter"), testParams.maxIter)
self.assertEqual(testParams._resolveParam(u"maxIter"), testParams.maxIter)
if sys.version_info[0] >= 3:
# In Python 3, it is allowed to get/set attributes with non-ascii characters.
e_cls = AttributeError
else:
e_cls = UnicodeEncodeError
self.assertRaises(e_cls, lambda: testParams._resolveParam(u"아"))
def test_params(self):
testParams = TestParams()
maxIter = testParams.maxIter
inputCol = testParams.inputCol
seed = testParams.seed
params = testParams.params
self.assertEqual(params, [inputCol, maxIter, seed])
self.assertTrue(testParams.hasParam(maxIter.name))
self.assertTrue(testParams.hasDefault(maxIter))
self.assertFalse(testParams.isSet(maxIter))
self.assertTrue(testParams.isDefined(maxIter))
self.assertEqual(testParams.getMaxIter(), 10)
self.assertTrue(testParams.hasParam(inputCol.name))
self.assertFalse(testParams.hasDefault(inputCol))
self.assertFalse(testParams.isSet(inputCol))
self.assertFalse(testParams.isDefined(inputCol))
with self.assertRaises(KeyError):
testParams.getInputCol()
otherParam = Param(Params._dummy(), "otherParam", "Parameter used to test that " +
"set raises an error for a non-member parameter.",
typeConverter=TypeConverters.toString)
with self.assertRaises(ValueError):
testParams.set(otherParam, "value")
# Since the default is normally random, set it to a known number for debug str
testParams._setDefault(seed=41)
self.assertEqual(
testParams.explainParams(),
"\n".join(["inputCol: input column name. (undefined)",
"maxIter: max number of iterations (>= 0). (default: 10)",
"seed: random seed. (default: 41)"]))
def test_clear_param(self):
df = self.spark.createDataFrame([(Vectors.dense([1.0]),), (Vectors.dense([2.0]),)], ["a"])
maScaler = MaxAbsScaler(inputCol="a", outputCol="scaled")
model = maScaler.fit(df)
self.assertTrue(model.isSet(model.outputCol))
self.assertEqual(model.getOutputCol(), "scaled")
model.clear(model.outputCol)
self.assertFalse(model.isSet(model.outputCol))
self.assertEqual(model.getOutputCol()[:12], 'MaxAbsScaler')
output = model.transform(df)
self.assertEqual(model.getOutputCol(), output.schema.names[1])
def test_kmeans_param(self):
algo = KMeans()
self.assertEqual(algo.getInitMode(), "k-means||")
algo.setK(10)
self.assertEqual(algo.getK(), 10)
algo.setInitSteps(10)
self.assertEqual(algo.getInitSteps(), 10)
self.assertEqual(algo.getDistanceMeasure(), "euclidean")
algo.setDistanceMeasure("cosine")
self.assertEqual(algo.getDistanceMeasure(), "cosine")
def test_hasseed(self):
noSeedSpecd = TestParams()
withSeedSpecd = TestParams(seed=42)
other = OtherTestParams()
# Check that we no longer use 42 as the magic number
self.assertNotEqual(noSeedSpecd.getSeed(), 42)
origSeed = noSeedSpecd.getSeed()
# Check that we only compute the seed once
self.assertEqual(noSeedSpecd.getSeed(), origSeed)
# Check that a specified seed is honored
self.assertEqual(withSeedSpecd.getSeed(), 42)
# Check that a different class has a different seed
self.assertNotEqual(other.getSeed(), noSeedSpecd.getSeed())
def test_param_property_error(self):
param_store = HasThrowableProperty()
self.assertRaises(RuntimeError, lambda: param_store.test_property)
params = param_store.params # should not invoke the property 'test_property'
self.assertEqual(len(params), 1)
def test_word2vec_param(self):
model = Word2Vec().setWindowSize(6)
# Check windowSize is set properly
self.assertEqual(model.getWindowSize(), 6)
def test_copy_param_extras(self):
tp = TestParams(seed=42)
extra = {tp.getParam(TestParams.inputCol.name): "copy_input"}
tp_copy = tp.copy(extra=extra)
self.assertEqual(tp.uid, tp_copy.uid)
self.assertEqual(tp.params, tp_copy.params)
for k, v in extra.items():
self.assertTrue(tp_copy.isDefined(k))
self.assertEqual(tp_copy.getOrDefault(k), v)
copied_no_extra = {}
for k, v in tp_copy._paramMap.items():
if k not in extra:
copied_no_extra[k] = v
self.assertEqual(tp._paramMap, copied_no_extra)
self.assertEqual(tp._defaultParamMap, tp_copy._defaultParamMap)
with self.assertRaises(TypeError):
tp.copy(extra={"unknown_parameter": None})
with self.assertRaises(TypeError):
tp.copy(extra=["must be a dict"])
def test_logistic_regression_check_thresholds(self):
self.assertIsInstance(
LogisticRegression(threshold=0.5, thresholds=[0.5, 0.5]),
LogisticRegression
)
self.assertRaisesRegexp(
ValueError,
"Logistic Regression getThreshold found inconsistent.*$",
LogisticRegression, threshold=0.42, thresholds=[0.5, 0.5]
)
def test_preserve_set_state(self):
dataset = self.spark.createDataFrame([(0.5,)], ["data"])
binarizer = Binarizer(inputCol="data")
self.assertFalse(binarizer.isSet("threshold"))
binarizer.transform(dataset)
binarizer._transfer_params_from_java()
self.assertFalse(binarizer.isSet("threshold"),
"Params not explicitly set should remain unset after transform")
def test_default_params_transferred(self):
dataset = self.spark.createDataFrame([(0.5,)], ["data"])
binarizer = Binarizer(inputCol="data")
# intentionally change the pyspark default, but don't set it
binarizer._defaultParamMap[binarizer.outputCol] = "my_default"
result = binarizer.transform(dataset).select("my_default").collect()
self.assertFalse(binarizer.isSet(binarizer.outputCol))
self.assertEqual(result[0][0], 1.0)
class DefaultValuesTests(PySparkTestCase):
"""
Test :py:class:`JavaParams` classes to see if their default Param values match
those in their Scala counterparts.
"""
def test_java_params(self):
import re
import pyspark.ml.feature
import pyspark.ml.classification
import pyspark.ml.clustering
import pyspark.ml.evaluation
import pyspark.ml.pipeline
import pyspark.ml.recommendation
import pyspark.ml.regression
modules = [pyspark.ml.feature, pyspark.ml.classification, pyspark.ml.clustering,
pyspark.ml.evaluation, pyspark.ml.pipeline, pyspark.ml.recommendation,
pyspark.ml.regression]
for module in modules:
for name, cls in inspect.getmembers(module, inspect.isclass):
if not name.endswith('Model') and not name.endswith('Params') \
and issubclass(cls, JavaParams) and not inspect.isabstract(cls) \
and not re.match("_?Java", name) and name != '_LSH' \
and name != '_Selector':
# NOTE: disable check_params_exist until there is parity with Scala API
check_params(self, cls(), check_params_exist=False)
# Additional classes that need explicit construction
from pyspark.ml.feature import CountVectorizerModel, StringIndexerModel
check_params(self, CountVectorizerModel.from_vocabulary(['a'], 'input'),
check_params_exist=False)
check_params(self, StringIndexerModel.from_labels(['a', 'b'], 'input'),
check_params_exist=False)
if __name__ == "__main__":
from pyspark.ml.tests.test_param import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| ConeyLiu/spark | python/pyspark/ml/tests/test_param.py | Python | apache-2.0 | 16,252 | 0.002031 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Integration test for Python cross-language pipelines for Java KafkaIO."""
from __future__ import absolute_import
import contextlib
import logging
import os
import socket
import subprocess
import time
import typing
import unittest
import apache_beam as beam
from apache_beam.io.external.kafka import ReadFromKafka
from apache_beam.io.external.kafka import WriteToKafka
from apache_beam.metrics import Metrics
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.testing.test_pipeline import TestPipeline
class CrossLanguageKafkaIO(object):
def __init__(self, bootstrap_servers, topic, expansion_service=None):
self.bootstrap_servers = bootstrap_servers
self.topic = topic
self.expansion_service = expansion_service
self.sum_counter = Metrics.counter('source', 'elements_sum')
def build_write_pipeline(self, pipeline):
_ = (
pipeline
| 'Impulse' >> beam.Impulse()
| 'Generate' >> beam.FlatMap(lambda x: range(1000)) # pylint: disable=range-builtin-not-iterating
| 'Reshuffle' >> beam.Reshuffle()
| 'MakeKV' >> beam.Map(lambda x:
(b'', str(x).encode())).with_output_types(
typing.Tuple[bytes, bytes])
| 'WriteToKafka' >> WriteToKafka(
producer_config={'bootstrap.servers': self.bootstrap_servers},
topic=self.topic,
expansion_service=self.expansion_service))
def build_read_pipeline(self, pipeline):
_ = (
pipeline
| 'ReadFromKafka' >> ReadFromKafka(
consumer_config={
'bootstrap.servers': self.bootstrap_servers,
'auto.offset.reset': 'earliest'
},
topics=[self.topic],
expansion_service=self.expansion_service)
| 'Windowing' >> beam.WindowInto(
beam.window.FixedWindows(300),
trigger=beam.transforms.trigger.AfterProcessingTime(60),
accumulation_mode=beam.transforms.trigger.AccumulationMode.
DISCARDING)
| 'DecodingValue' >> beam.Map(lambda elem: int(elem[1].decode()))
| 'CombineGlobally' >> beam.CombineGlobally(sum).without_defaults()
| 'SetSumCounter' >> beam.Map(self.sum_counter.inc))
def run_xlang_kafkaio(self, pipeline):
self.build_write_pipeline(pipeline)
self.build_read_pipeline(pipeline)
pipeline.run(False)
@unittest.skipUnless(
os.environ.get('LOCAL_KAFKA_JAR'),
"LOCAL_KAFKA_JAR environment var is not provided.")
class CrossLanguageKafkaIOTest(unittest.TestCase):
def get_open_port(self):
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except: # pylint: disable=bare-except
# Above call will fail for nodes that only support IPv6.
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
s.bind(('localhost', 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
@contextlib.contextmanager
def local_kafka_service(self, local_kafka_jar_file):
kafka_port = str(self.get_open_port())
zookeeper_port = str(self.get_open_port())
kafka_server = None
try:
kafka_server = subprocess.Popen(
['java', '-jar', local_kafka_jar_file, kafka_port, zookeeper_port])
time.sleep(3)
yield kafka_port
finally:
if kafka_server:
kafka_server.kill()
def get_options(self):
options = PipelineOptions([
'--runner',
'FlinkRunner',
'--parallelism',
'2',
'--experiment',
'beam_fn_api'
])
return options
def test_kafkaio_write(self):
local_kafka_jar = os.environ.get('LOCAL_KAFKA_JAR')
with self.local_kafka_service(local_kafka_jar) as kafka_port:
options = self.get_options()
p = TestPipeline(options=options)
p.not_use_test_runner_api = True
CrossLanguageKafkaIO('localhost:%s' % kafka_port,
'xlang_kafkaio_test').build_write_pipeline(p)
job = p.run()
job.wait_until_finish()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| iemejia/incubator-beam | sdks/python/apache_beam/io/external/xlang_kafkaio_it_test.py | Python | apache-2.0 | 4,950 | 0.005253 |
from bibliopixel.animation.circle import Circle
from bibliopixel.colors import palettes
class Swirl(Circle):
COLOR_DEFAULTS = ('palette', palettes.get('three_sixty')),
def __init__(self, layout, angle=12, **kwds):
super().__init__(layout, **kwds)
self.angle = angle
def pre_run(self):
self._step = 0
def step(self, amt=1):
for a in range(0, 360, self.angle):
c = self.palette(self._step)
for i in range(self.ringCount):
self.layout.set(i, a, c)
self._step += amt
| ManiacalLabs/BiblioPixelAnimations | BiblioPixelAnimations/circle/swirl.py | Python | mit | 586 | 0 |
from task_manager.tag_manager.tag_manager import TaggingModel
from task_manager.models import Task
import numpy as np
import json
enabled_tagger_ids = [tagger.pk for tagger in Task.objects.filter(task_type='train_tagger').filter(status='completed')]
enabled_taggers = {}
# Load Tagger models
for _id in enabled_tagger_ids:
tm = TaggingModel()
tm.load(_id)
enabled_taggers[_id] = tm
class TextTaggerPreprocessor(object):
"""Preprocessor implementation for running TEXTA Text Taggers on the selected documents.
"""
def __init__(self, feature_map={}):
self._feature_map = feature_map
def transform(self, documents, **kwargs):
input_features = json.loads(kwargs['text_tagger_preprocessor_feature_names'])
tagger_ids_to_apply = [int(_id) for _id in json.loads(kwargs['text_tagger_preprocessor_taggers'])]
taggers_to_apply = []
if not kwargs.get('text_tagger_preprocessor_feature_names', None):
return documents
# Load tagger models
for _id in tagger_ids_to_apply:
tm = TaggingModel()
tm.load(_id)
taggers_to_apply.append(tm)
for input_feature in input_features:
texts = []
for document in documents:
# Take into account nested fields encoded as: 'field.sub_field'
decoded_text = document
for k in input_feature.split('.'):
# Field might be empty and not included in document
if k in decoded_text:
decoded_text = decoded_text[k]
else:
decoded_text = ''
break
try:
decoded_text.strip().decode()
except AttributeError:
decoded_text.strip()
texts.append(decoded_text)
if not texts:
return documents
## Dies with empty text!
results = []
tagger_descriptions = []
for tagger in taggers_to_apply:
tagger_descriptions.append(tagger.description)
result_vector = tagger.tag(texts)
results.append(result_vector)
results_transposed = np.array(results).transpose()
for i,tagger_ids in enumerate(results_transposed):
positive_tag_ids = np.nonzero(tagger_ids)
positive_tags = [tagger_descriptions[positive_tag_id] for positive_tag_id in positive_tag_ids[0]]
texta_facts = []
if positive_tags:
if 'texta_facts' not in documents[i]:
documents[i]['texta_facts'] = []
for tag in positive_tags:
new_fact = {'fact': 'TEXTA_TAG', 'str_val': tag, 'doc_path': input_feature, 'spans': json.dumps([[0,len(texts[i])]])}
texta_facts.append(new_fact)
documents[i]['texta_facts'].extend(texta_facts)
# Get total tagged documents, get np array of results
total_positives = np.count_nonzero(results)
return {"documents":documents, "meta": {'documents_tagged': total_positives}}
| cbentes/texta | dataset_importer/document_preprocessor/preprocessors/text_tagger.py | Python | gpl-3.0 | 3,426 | 0.007589 |
#!/usr/bin/env python3
# -*- coding : utf-8 -*-
from collections import Iterable
from collections import Iterator
isinstance([], Iterable)
isinstance({}, Iterable)
isinstance((), Iterable)
isinstance('abc', Iterable)
isinstance((x for x in range(10)), Iterable)
isinstance(100, Iterable)
# Iterable but not Iterator
isinstance([], Iterator)
isinstance({}, Iterator)
isinstance((), Iterator)
isinstance('abc', Iterator)
isinstance((x for x in range(10)), Iterator)
isinstance(100, Iterator)
# use iter() to migrate iterable to iterator
# iterator is a data stream and donnot have a fixed length and is a lazy-calculated object
# if you could use 'for loop' , then it's a Iterable
# if you could use 'next()' , then it's a Iterator
for x in [1, 2, 3, 4, 5]:
pass
# Equals to
it = iter([1, 2, 3, 4, 5])
while True:
try:
x = next(it)
except StopIteration:
break
| kmahyyg/learn_py3 | adv_feature/adv_feature_iterable.py | Python | agpl-3.0 | 894 | 0.001119 |
# Copyright (c) 2011-2015 Berkeley Model United Nations. All rights reserved.
# Use of this source code is governed by a BSD License (see LICENSE).
import json
import requests
from django.conf import settings
def get_contact(school):
if not settings.ZOHO_CREDENTIALS:
return
list_url = 'https://invoice.zoho.com/api/v3/contacts?organization_id=' + settings.ORGANIZATION_ID + '&authtoken=' + settings.AUTHTOKEN
contact = {
"company_name_contains": school.name
}
return requests.get(list_url, params=contact).json()["contacts"][0]["contact_id"]
def generate_contact_attributes(school):
return {
"contact_name": school.primary_name,
"company_name": school.name,
"payment_terms": "",
"payment_terms_label": "Due on Receipt",
"currency_id": "",
"website": "",
"custom_fields": [
],
"billing_address": {
"address": "",
"city": "",
"state": "",
"zip": "",
"country": "",
"fax": ""
},
"shipping_address": {
"address": "",
"city": "",
"state": "",
"zip": "",
"country": "",
"fax": ""
},
"contact_persons": [{
"salutation": "",
"first_name": "",
"last_name": "",
"email": school.primary_email,
"phone": "",
"mobile": "",
"is_primary_contact": True
}],
"default_templates": {
"invoice_template_id": "",
"estimate_template_id": "",
"creditnote_template_id": "",
"invoice_email_template_id": "",
"estimate_email_template_id": "",
"creditnote_email_template_id": ""
},
"notes": ""
}
| ctmunwebmaster/huxley | huxley/utils/zoho.py | Python | bsd-3-clause | 1,767 | 0.01245 |
# coding: UTF-8
#
# Copyright 2014 by SCSK Corporation.
#
# This file is part of PrimeCloud Controller(TM).
#
# PrimeCloud Controller(TM) is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# PrimeCloud Controller(TM) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PrimeCloud Controller(TM). If not, see <http://www.gnu.org/licenses/>.
#
from iaasgw.exception.iaasException import IaasException
from iaasgw.log.log import IaasLogger
from iaasgw.module.ec2.ec2module import TagSet
from iaasgw.utils.stringUtils import isNotEmpty, isEmpty
import time
import traceback
class ec2VolumController(object):
logger = IaasLogger()
client = None
conn = None
platforminfo = None
def __init__(self, platforminfo, ec2iaasclient, conn):
self.client = ec2iaasclient
self.conn = conn
self.platforminfo = platforminfo
def startVolumes(self, instanceNo) :
# ボリューム情報の取得
table = self.conn.getTable("AWS_VOLUME")
volumes = self.conn.select(table.select(table.c.INSTANCE_NO==instanceNo))
for volume in volumes :
self.startVolume(instanceNo, volume["VOLUME_NO"])
def startVolume(self, instanceNo, volumeNo) :
table = self.conn.getTable("AWS_VOLUME")
volume = self.conn.selectOne(table.select(table.c.VOLUME_NO==volumeNo))
# インスタンスIDがある場合はスキップ
if (isNotEmpty(volume["INSTANCE_ID"])) :
return
if (isEmpty(volume["VOLUME_ID"])) :
# ボリュームIDがない場合は新規作成
self.createVolume(instanceNo, volumeNo)
# ボリュームの作成待ち
self.waitCreateVolume(instanceNo, volumeNo)
# ボリュームにタグを付ける
self.createTag(volumeNo)
# ボリュームのアタッチ
self.attachVolume(instanceNo, volumeNo)
# ボリュームのアタッチ待ち
self.waitAttachVolume(instanceNo, volumeNo)
def stopVolumes(self, instanceNo) :
# ボリューム情報の取得
awsVolumes = self.getAwsVolumes(instanceNo)
for volume in awsVolumes :
self.stopVolume(instanceNo, volume["VOLUME_NO"])
def stopVolume(self, instanceNo, volumeNo):
table = self.conn.getTable("AWS_VOLUME")
awsVolume = self.conn.selectOne(table.select(table.c.VOLUME_NO==volumeNo))
# ボリュームIDがない場合はスキップ
if (isEmpty(awsVolume["VOLUME_ID"])):
return
# インスタンスIDがない場合はスキップ
if (isEmpty(awsVolume["INSTANCE_ID"])) :
return;
try :
# ボリュームのデタッチ
self.detachVolume(instanceNo, volumeNo)
# ボリュームのデタッチ待ち
self.waitDetachVolume(instanceNo, volumeNo)
except Exception, e:
self.logger.error(traceback.format_exc())
# 情報が不整合(インスタンス異常終了時など)の場合、警告ログと後始末のみ行う
self.logger.warn(e.massage);
table = self.conn.getTable("AWS_VOLUME")
updateDict = self.conn.selectOne(table.select(table.c.VOLUME_NO==volumeNo))
updateDict["STATUS"] = "error"
updateDict["INSTANCE_ID"] = None
sql = table.update(table.c.VOLUME_NO ==updateDict["VOLUME_NO"], values=updateDict)
self.conn.execute(sql)
def getAwsVolumes(self, instanceNo) :
table = self.conn.getTable("AWS_VOLUME")
awsVolumes = self.conn.select(table.select(table.c.INSTANCE_NO==instanceNo))
if (awsVolumes or len(awsVolumes) < 1) :
return awsVolumes;
# Platformのチェック
retVolumes = []
for awsVolume in awsVolumes:
# PlatformNoが異なる場合、データ不整合なので警告ログを出す
if (self.client.getPlatformNo() != awsVolume["PLATFORM_NO"]) :
self.logger.warn(None, "EPROCESS-000201",[awsVolume["VOLUME_NAME"], awsVolume["PLATFORM_NO"], self.client.getPlatformNo()])
else :
retVolumes.append(awsVolume)
return retVolumes;
def waitVolume(self, volumeId) :
# スナップショットの処理待ち
volume = None
while (True):
volume = self.client.describeVolume(volumeId);
status = volume.status
if status == "available" or status == "in-use" or status == "error":
break
if status != "creating" and status != "deleting" :
#予期しないステータス
raise IaasException("EPROCESS-000112", [volumeId, status,])
return volume;
def createVolume(self, instanceNo, volumeNo) :
tableAWSVOL = self.conn.getTable("AWS_VOLUME")
awsVolume = self.conn.selectOne(tableAWSVOL.select(tableAWSVOL.c.VOLUME_NO==volumeNo))
# ボリュームの作成
volume = self.client.createVolume(awsVolume["AVAILABILITY_ZONE"], awsVolume["SIZE"], awsVolume["SNAPSHOT_ID"])
#イベントログ出力
tableCPNT = self.conn.getTable("COMPONENT")
component = self.conn.selectOne(tableCPNT.select(tableCPNT.c.COMPONENT_NO==awsVolume["COMPONENT_NO"]))
componentName = None
if component:
componentName = component["COMPONENT_NAME"]
tableINS = self.conn.getTable("INSTANCE")
instance = self.conn.selectOne(tableINS.select(tableINS.c.INSTANCE_NO==instanceNo))
self.conn.debug(instance["FARM_NO"], awsVolume["COMPONENT_NO"], componentName, instanceNo, instance["INSTANCE_NAME"], "AwsEbsCreate",["EC2",])
# データベース更新
updateDict = self.conn.selectOne(tableAWSVOL.select(tableAWSVOL.c.VOLUME_NO==volumeNo))
updateDict["VOLUME_ID"] = volume.volumeId
updateDict["STATUS"] = volume.status
sql = tableAWSVOL.update(tableAWSVOL.c.VOLUME_NO ==updateDict["VOLUME_NO"], values=updateDict)
self.conn.execute(sql)
def waitCreateVolume(self, instanceNo, volumeNo) :
tableAWSVOL = self.conn.getTable("AWS_VOLUME")
awsVolume = self.conn.selectOne(tableAWSVOL.select(tableAWSVOL.c.VOLUME_NO==volumeNo))
volumeId = awsVolume["VOLUME_ID"]
# ボリュームの作成待ち
volume = None
try :
volume = self.waitVolume(volumeId)
if volume.status != "available":
#ボリューム作成失敗時
raise IaasException("EPROCESS-000113", [volumeId, volume.status,])
# ログ出力
self.logger.info(None, "IPROCESS-100122", [volumeId,])
except Exception:
self.logger.error(traceback.format_exc())
# ボリューム作成失敗時
awsVolume["VOLUME_ID"] = None
awsVolume["STATUS"] = None
sql = tableAWSVOL.update(tableAWSVOL.c.VOLUME_NO ==awsVolume["VOLUME_NO"], values=awsVolume)
self.conn.execute(sql)
raise
#イベントログ出力
tableCPNT = self.conn.getTable("COMPONENT")
component = self.conn.selectOne(tableCPNT.select(tableCPNT.c.COMPONENT_NO==awsVolume["COMPONENT_NO"]))
componentName = None
if component:
componentName = component["COMPONENT_NAME"]
tableINS = self.conn.getTable("INSTANCE")
instance = self.conn.selectOne(tableINS.select(tableINS.c.INSTANCE_NO==instanceNo))
self.conn.debug(instance["FARM_NO"], awsVolume["COMPONENT_NO"], componentName, instanceNo, instance["INSTANCE_NAME"],
"AwsEbsCreateFinish",["EC2", awsVolume["VOLUME_ID"], awsVolume["SIZE"]])
# データベース更新
updateDict = self.conn.selectOne(tableAWSVOL.select(tableAWSVOL.c.VOLUME_NO==volumeNo))
updateDict["STATUS"] = volume.status
sql = tableAWSVOL.update(tableAWSVOL.c.VOLUME_NO ==updateDict["VOLUME_NO"], values=updateDict)
self.conn.execute(sql)
def checkAvailableVolume(self, instanceNo, volumeNo) :
table = self.conn.getTable("AWS_VOLUME")
awsVolume = self.conn.selectOne(table.select(table.c.VOLUME_NO==volumeNo))
volumeId = awsVolume["VOLUME_ID"]
# ボリュームが利用可能かどうかのチェック
volume = self.client.describeVolume(volumeId);
if volume.status != "available":
#ボリュームがavailableでない時
raise IaasException("EPROCESS-000114", [volumeId, volume.status,])
def attachVolume(self, instanceNo, volumeNo) :
#AWS_INSTANCE 取得
tableAWSINS = self.conn.getTable("AWS_INSTANCE")
awsInstance = self.conn.selectOne(tableAWSINS.select(tableAWSINS.c.INSTANCE_NO==instanceNo))
instanceId = awsInstance["INSTANCE_ID"]
tableAWSVOL = self.conn.getTable("AWS_VOLUME")
awsVolume = self.conn.selectOne(tableAWSVOL.select(tableAWSVOL.c.VOLUME_NO==volumeNo))
volumeId = awsVolume["VOLUME_ID"]
#イベントログ出力
tableCPNT = self.conn.getTable("COMPONENT")
component = self.conn.selectOne(tableCPNT.select(tableCPNT.c.COMPONENT_NO==awsVolume["COMPONENT_NO"]))
componentName = None
if component:
componentName = component["COMPONENT_NAME"]
tableINS = self.conn.getTable("INSTANCE")
instance = self.conn.selectOne(tableINS.select(tableINS.c.INSTANCE_NO==instanceNo))
self.conn.debug(instance["FARM_NO"], awsVolume["COMPONENT_NO"], componentName, instanceNo, instance["INSTANCE_NAME"],
"AwsEbsAttach",[instance["INSTANCE_NAME"], awsVolume["VOLUME_ID"], awsVolume["DEVICE"]])
# ボリュームのアタッチ
self.client.attachVolume(volumeId, instanceId, awsVolume["DEVICE"])
# データベースの更新
awsVolume["INSTANCE_ID"] = instanceId
sql = tableAWSVOL.update(tableAWSVOL.c.VOLUME_NO ==awsVolume["VOLUME_NO"], values=awsVolume)
self.conn.execute(sql)
def waitAttachVolume(self, instanceNo, volumeNo) :
tableAWSVOL = self.conn.getTable("AWS_VOLUME")
awsVolume = self.conn.selectOne(tableAWSVOL.select(tableAWSVOL.c.VOLUME_NO==volumeNo))
volumeId = awsVolume["VOLUME_ID"]
instanceId = awsVolume["INSTANCE_ID"]
volume = None
try :
# TODO: アタッチ情報がすぐに更新されない問題に暫定的に対応
for i in range(0, 10):
volume = self.waitVolume(volumeId)
if volume.status == "in-use":
break
else:
time.sleep(10)
#タイムアウト後判定
if volume.status != "in-use":
# アタッチに失敗した場合
raise IaasException("EPROCESS-000115", [instanceId, volumeId, volume.status,])
# ログ出力
self.logger.info(None, "IPROCESS-100124", [volumeId, instanceId,])
except Exception:
self.logger.error(traceback.format_exc())
# アタッチに失敗した場合
updateDict = self.conn.selectOne(tableAWSVOL.select(tableAWSVOL.c.VOLUME_NO==volumeNo))
updateDict["STATUS"] = "error"
updateDict["INSTANCE_ID"] = None
sql = tableAWSVOL.update(tableAWSVOL.c.VOLUME_NO ==updateDict["VOLUME_NO"], values=updateDict)
self.conn.execute(sql)
raise
#イベントログ出力
tableCPNT = self.conn.getTable("COMPONENT")
component = self.conn.selectOne(tableCPNT.select(tableCPNT.c.COMPONENT_NO==awsVolume["COMPONENT_NO"]))
componentName = None
if component:
componentName = component["COMPONENT_NAME"]
tableINS = self.conn.getTable("INSTANCE")
instance = self.conn.selectOne(tableINS.select(tableINS.c.INSTANCE_NO==instanceNo))
self.conn.debug(instance["FARM_NO"], awsVolume["COMPONENT_NO"], componentName, instanceNo, instance["INSTANCE_NAME"],
"AwsEbsAttachFinish",[instance["INSTANCE_NAME"], awsVolume["VOLUME_ID"], awsVolume["DEVICE"]])
# データベースの更新
updateDict = self.conn.selectOne(tableAWSVOL.select(tableAWSVOL.c.VOLUME_NO==volumeNo))
updateDict["STATUS"] = volume.status
sql = tableAWSVOL.update(tableAWSVOL.c.VOLUME_NO ==updateDict["VOLUME_NO"], values=updateDict)
self.conn.execute(sql)
def detachVolume(self, instanceNo, volumeNo) :
tableAWSVOL = self.conn.getTable("AWS_VOLUME")
awsVolume = self.conn.selectOne(tableAWSVOL.select(tableAWSVOL.c.VOLUME_NO==volumeNo))
volumeId = awsVolume["VOLUME_ID"]
instanceId = awsVolume["INSTANCE_ID"]
device = awsVolume["DEVICE"]
#イベントログ出力
tableCPNT = self.conn.getTable("COMPONENT")
component = self.conn.selectOne(tableCPNT.select(tableCPNT.c.COMPONENT_NO==awsVolume["COMPONENT_NO"]))
componentName = None
if component:
componentName = component["COMPONENT_NAME"]
tableINS = self.conn.getTable("INSTANCE")
instance = self.conn.selectOne(tableINS.select(tableINS.c.INSTANCE_NO==instanceNo))
self.conn.debug(instance["FARM_NO"], awsVolume["COMPONENT_NO"], componentName, instanceNo, instance["INSTANCE_NAME"],
"AwsEbsDetach",[instance["INSTANCE_NAME"], awsVolume["VOLUME_ID"], awsVolume["DEVICE"]])
# ボリュームのデタッチ
self.client.detachVolume(volumeId, instanceId, device);
def waitDetachVolume(self, instanceNo, volumeNo) :
tableAWSVOL = self.conn.getTable("AWS_VOLUME")
awsVolume = self.conn.selectOne(tableAWSVOL.select(tableAWSVOL.c.VOLUME_NO==volumeNo))
volumeId = awsVolume["VOLUME_ID"]
instanceId = awsVolume["INSTANCE_ID"]
volume = None
try :
volume = self.waitVolume(volumeId)
# TODO: デタッチ情報がすぐに更新されない問題に暫定的に対応
for i in range(0, 10):
volume = self.waitVolume(volumeId)
if volume.status == "available":
break
else:
time.sleep(10)
#タイムアウト後判定
if volume.status != "available":
# デタッチに失敗した場合
raise IaasException("EPROCESS-000116", [instanceId, volumeId, volume.status,])
# ログ出力
self.logger.info(None, "IPROCESS-100126", [volumeId, instanceId,])
except Exception:
self.logger.error(traceback.format_exc())
# デタッチに失敗した場合
updateDict = self.conn.selectOne(tableAWSVOL.select(tableAWSVOL.c.VOLUME_NO==volumeNo))
updateDict["STATUS"] = "error"
updateDict["INSTANCE_ID"] = None
sql = tableAWSVOL.update(tableAWSVOL.c.VOLUME_NO ==updateDict["VOLUME_NO"], values=updateDict)
self.conn.execute(sql)
raise
#イベントログ出力
tableCPNT = self.conn.getTable("COMPONENT")
component = self.conn.selectOne(tableCPNT.select(tableCPNT.c.COMPONENT_NO==awsVolume["COMPONENT_NO"]))
componentName = None
if component:
componentName = component["COMPONENT_NAME"]
tableINS = self.conn.getTable("INSTANCE")
instance = self.conn.selectOne(tableINS.select(tableINS.c.INSTANCE_NO==instanceNo))
self.conn.debug(instance["FARM_NO"], awsVolume["COMPONENT_NO"], componentName, instanceNo, instance["INSTANCE_NAME"],
"AwsEbsDetachFinish",[instance["INSTANCE_NAME"], awsVolume["VOLUME_ID"], awsVolume["DEVICE"]])
# データベースの更新
updateDict = self.conn.selectOne(tableAWSVOL.select(tableAWSVOL.c.VOLUME_NO==volumeNo))
updateDict["STATUS"] = volume.status
updateDict["INSTANCE_ID"] = None
sql = tableAWSVOL.update(tableAWSVOL.c.VOLUME_NO ==updateDict["VOLUME_NO"], values=updateDict)
self.conn.execute(sql)
def createTag(self, volumeNo) :
# Eucalyptusの場合はタグを付けない
platform = self.platforminfo["platformName"]
if (platform == "eucalyptus"):
return
tableAWSVOL = self.conn.getTable("AWS_VOLUME")
awsVolume = self.conn.selectOne(tableAWSVOL.select(tableAWSVOL.c.VOLUME_NO==volumeNo))
tableCPNT = self.conn.getTable("COMPONENT")
component = self.conn.selectOne(tableCPNT.select(tableCPNT.c.COMPONENT_NO==awsVolume["COMPONENT_NO"]))
componentName = None
if component:
componentName = component["COMPONENT_NAME"]
tableINS = self.conn.getTable("INSTANCE")
instance = self.conn.selectOne(tableINS.select(tableINS.c.INSTANCE_NO==awsVolume["INSTANCE_NO"]))
# 表示用の文字列を作成する
tagValue = str(instance["FQDN"]) + "_" + str(componentName)
tags = []
tags.append(TagSet(None, None, "Name", tagValue))
tags.append(TagSet(None, None, "ServiceName", componentName))
tags.append(TagSet(None, None, "UserName", self.client.getUsername()))
# Nameタグを追加する
self.client.createTags(awsVolume["VOLUME_ID"], tags);
| yterauchi/primecloud-controller | iaas-gw/src/iaasgw/controller/ec2/ec2VolumController.py | Python | gpl-2.0 | 17,993 | 0.013944 |
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Top-level presubmit script for V8.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import sys
def _V8PresubmitChecks(input_api, output_api):
"""Runs the V8 presubmit checks."""
import sys
sys.path.append(input_api.os_path.join(
input_api.PresubmitLocalPath(), 'tools'))
from presubmit import CppLintProcessor
from presubmit import SourceProcessor
from presubmit import CheckGeneratedRuntimeTests
results = []
if not CppLintProcessor().Run(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError("C++ lint check failed"))
if not SourceProcessor().Run(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError(
"Copyright header, trailing whitespaces and two empty lines " \
"between declarations check failed"))
if not CheckGeneratedRuntimeTests(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError(
"Generated runtime tests check failed"))
return results
def _CheckUnwantedDependencies(input_api, output_api):
"""Runs checkdeps on #include statements added in this
change. Breaking - rules is an error, breaking ! rules is a
warning.
"""
# We need to wait until we have an input_api object and use this
# roundabout construct to import checkdeps because this file is
# eval-ed and thus doesn't have __file__.
original_sys_path = sys.path
try:
sys.path = sys.path + [input_api.os_path.join(
input_api.PresubmitLocalPath(), 'buildtools', 'checkdeps')]
import checkdeps
from cpp_checker import CppChecker
from rules import Rule
finally:
# Restore sys.path to what it was before.
sys.path = original_sys_path
added_includes = []
for f in input_api.AffectedFiles():
if not CppChecker.IsCppFile(f.LocalPath()):
continue
changed_lines = [line for line_num, line in f.ChangedContents()]
added_includes.append([f.LocalPath(), changed_lines])
deps_checker = checkdeps.DepsChecker(input_api.PresubmitLocalPath())
error_descriptions = []
warning_descriptions = []
for path, rule_type, rule_description in deps_checker.CheckAddedCppIncludes(
added_includes):
description_with_path = '%s\n %s' % (path, rule_description)
if rule_type == Rule.DISALLOW:
error_descriptions.append(description_with_path)
else:
warning_descriptions.append(description_with_path)
results = []
if error_descriptions:
results.append(output_api.PresubmitError(
'You added one or more #includes that violate checkdeps rules.',
error_descriptions))
if warning_descriptions:
results.append(output_api.PresubmitPromptOrNotify(
'You added one or more #includes of files that are temporarily\n'
'allowed but being removed. Can you avoid introducing the\n'
'#include? See relevant DEPS file(s) for details and contacts.',
warning_descriptions))
return results
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(input_api.canned_checks.CheckOwners(
input_api, output_api, source_file_filter=None))
results.extend(_V8PresubmitChecks(input_api, output_api))
results.extend(_CheckUnwantedDependencies(input_api, output_api))
return results
def _SkipTreeCheck(input_api, output_api):
"""Check the env var whether we want to skip tree check.
Only skip if src/version.cc has been updated."""
src_version = 'src/version.cc'
FilterFile = lambda file: file.LocalPath() == src_version
if not input_api.AffectedSourceFiles(
lambda file: file.LocalPath() == src_version):
return False
return input_api.environ.get('PRESUBMIT_TREE_CHECK') == 'skip'
def _CheckChangeLogFlag(input_api, output_api):
"""Checks usage of LOG= flag in the commit message."""
results = []
if input_api.change.BUG and not 'LOG' in input_api.change.tags:
results.append(output_api.PresubmitError(
'An issue reference (BUG=) requires a change log flag (LOG=). '
'Use LOG=Y for including this commit message in the change log. '
'Use LOG=N or leave blank otherwise.'))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(_CheckChangeLogFlag(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(_CheckChangeLogFlag(input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
if not _SkipTreeCheck(input_api, output_api):
results.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
json_url='http://v8-status.appspot.com/current?format=json'))
return results
def GetPreferredTryMasters(project, change):
return {
'tryserver.v8': {
'v8_linux_rel': set(['defaulttests']),
'v8_linux_dbg': set(['defaulttests']),
'v8_linux_nosnap_rel': set(['defaulttests']),
'v8_linux_nosnap_dbg': set(['defaulttests']),
'v8_linux64_rel': set(['defaulttests']),
'v8_linux_arm_dbg': set(['defaulttests']),
'v8_linux_arm64_rel': set(['defaulttests']),
'v8_linux_layout_dbg': set(['defaulttests']),
'v8_mac_rel': set(['defaulttests']),
'v8_win_rel': set(['defaulttests']),
},
}
| nextsmsversion/macchina.io | platform/JS/V8/v8-3.28.4/PRESUBMIT.py | Python | apache-2.0 | 7,096 | 0.008737 |
"""
CacheItem interface:
'_id': string,
'url': string,
'response_url': string,
'body': string,
'head': string,
'response_code': int,
'cookies': None,#grab.response.cookies,
"""
from hashlib import sha1
import zlib
import logging
import marshal
import time
from grab.response import Response
from grab.cookie import CookieManager
from grab.util.py3k_support import *
logger = logging.getLogger('grab.spider.cache_backend.postgresql')
class CacheBackend(object):
def __init__(self, database, use_compression=True, spider=None, **kwargs):
import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_READ_COMMITTED
self.spider = spider
self.conn = psycopg2.connect(dbname=database, **kwargs)
self.conn.set_isolation_level(ISOLATION_LEVEL_READ_COMMITTED)
self.cursor = self.conn.cursor()
res = self.cursor.execute("""
SELECT
TABLE_NAME
FROM
INFORMATION_SCHEMA.TABLES
WHERE
TABLE_TYPE = 'BASE TABLE'
AND
table_schema NOT IN ('pg_catalog', 'information_schema')"""
)
found = False
for row in self.cursor:
if row[0] == 'cache':
found = True
break
if not found:
self.create_cache_table()
def create_cache_table(self):
self.cursor.execute('BEGIN')
self.cursor.execute('''
CREATE TABLE cache (
id BYTEA NOT NULL CONSTRAINT primary_key PRIMARY KEY,
timestamp INT NOT NULL,
data BYTEA NOT NULL,
);
CREATE INDEX timestamp_idx ON cache (timestamp);
''')
self.cursor.execute('COMMIT')
def get_item(self, url, timeout=None):
"""
Returned item should have specific interface. See module docstring.
"""
_hash = self.build_hash(url)
with self.spider.save_timer('cache.read.postgresql_query'):
self.cursor.execute('BEGIN')
if timeout is None:
query = ""
else:
ts = int(time.time()) - timeout
query = " AND timestamp > %d" % ts
# py3 hack
if PY3K:
sql = '''
SELECT data
FROM cache
WHERE id = {0} %(query)s
''' % {'query': query}
else:
sql = '''
SELECT data
FROM cache
WHERE id = %%s %(query)s
''' % {'query': query}
res = self.cursor.execute(sql, (_hash,))
row = self.cursor.fetchone()
self.cursor.execute('COMMIT')
if row:
data = row[0]
return self.unpack_database_value(data)
else:
return None
def unpack_database_value(self, val):
with self.spider.save_timer('cache.read.unpack_data'):
dump = zlib.decompress(str(val))
return marshal.loads(dump)
def build_hash(self, url):
with self.spider.save_timer('cache.read.build_hash'):
if isinstance(url, unicode):
utf_url = url.encode('utf-8')
else:
utf_url = url
return sha1(utf_url).hexdigest()
def remove_cache_item(self, url):
_hash = self.build_hash(url)
self.cursor.execute('begin')
self.cursor.execute('''
DELETE FROM cache WHERE id = x%s
''', (_hash,))
self.cursor.execute('commit')
def load_response(self, grab, cache_item):
grab.fake_response(cache_item['body'])
body = cache_item['body']
def custom_prepare_response_func(transport, g):
response = Response()
response.head = cache_item['head']
response.body = body
response.code = cache_item['response_code']
response.download_size = len(body)
response.upload_size = 0
response.download_speed = 0
# Hack for deprecated behaviour
if 'response_url' in cache_item:
response.url = cache_item['response_url']
else:
logger.debug('You cache contains items without `response_url` key. It is depricated data format. Please re-download you cache or build manually `response_url` keys.')
response.url = cache_item['url']
response.parse()
response.cookies = CookieManager(transport.extract_cookiejar())
return response
grab.process_request_result(custom_prepare_response_func)
def save_response(self, url, grab):
body = grab.response.body
item = {
'url': url,
'response_url': grab.response.url,
'body': body,
'head': grab.response.head,
'response_code': grab.response.code,
'cookies': None,
}
self.set_item(url, item)
def set_item(self, url, item):
import psycopg2
_hash = self.build_hash(url)
data = self.pack_database_value(item)
self.cursor.execute('BEGIN')
ts = int(time.time())
# py3 hack
if PY3K:
sql = '''
UPDATE cache SET timestamp = {0}, data = {1} WHERE id = {2};
INSERT INTO cache (id, timestamp, data)
SELECT {2}, {0}, {1} WHERE NOT EXISTS (SELECT 1 FROM cache WHERE id = {2});
'''
else:
sql = '''
UPDATE cache SET timestamp = %s, data = %s WHERE id = %s;
INSERT INTO cache (id, timestamp, data)
SELECT %s, %s, %s WHERE NOT EXISTS (SELECT 1 FROM cache WHERE id = %s);
'''
res = self.cursor.execute(sql, (ts, psycopg2.Binary(data), _hash, _hash, ts, psycopg2.Binary(data), _hash))
self.cursor.execute('COMMIT')
def pack_database_value(self, val):
dump = marshal.dumps(val)
return zlib.compress(dump)
def clear(self):
self.cursor.execute('BEGIN')
self.cursor.execute('TRUNCATE cache')
self.cursor.execute('COMMIT')
def has_item(self, url, timeout=None):
"""
Test if required item exists in the cache.
"""
_hash = self.build_hash(url)
with self.spider.save_timer('cache.read.postgresql_query'):
if timeout is None:
query = ""
else:
ts = int(time.time()) - timeout
query = " AND timestamp > %d" % ts
res = self.cursor.execute('''
SELECT id
FROM cache
WHERE id = %%s %(query)s
LIMIT 1
''' % {'query': query},
(_hash,))
row = self.cursor.fetchone()
return True if row else False
| subeax/grab | grab/spider/cache_backend/postgresql.py | Python | mit | 6,990 | 0.001001 |
import websocket
import package
import thread
import time
import run
import random
import config
import dht
import logging
logging.basicConfig()
def on_message(ws, message):
#d = package.LoadPackage(message)
#res = run.PackageParser(d)
#ws.send(package.DumpPackage(res))
print message
def on_error(ws, error):
print(error)
def on_close(ws):
print("### closed ###")
def on_open(ws):
deviceConfig = config.DeviceConfig()
deviceConfig.Update("device.conf")
HSPackage = package.GenSH(deviceConfig)
#print HSPackage,123
ws.send(HSPackage)
def SendRandomData(*args):
while True:
humdi, temp = dht.GetData()
if ( humdi == -1 or temp == -1):
continue
dump = package.SensorDump(0, temp)
dump1 = package.SensorDump(1, humdi)
ws.send(dump)
ws.send(dump1)
time.sleep(1)
thread.start_new_thread(SendRandomData, ())
if __name__ == "__main__":
ws = websocket.WebSocketApp("ws://ali.imspace.cn:3000/device",
on_message = on_message,
on_error = on_error,
on_close = on_close)
ws.on_open = on_open
ws.run_forever()
| spacemeowx2/remote-web | client/client.py | Python | mit | 1,269 | 0.01576 |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for interacting with Google Compute Engine firewalls."""
import socket
from google.apputils import appcommands
import gflags as flags
from gcutil_lib import command_base
from gcutil_lib import gcutil_errors
from gcutil_lib import utils
FLAGS = flags.FLAGS
class FirewallCommand(command_base.GoogleComputeCommand):
"""Base command for working with the firewalls collection."""
print_spec = command_base.ResourcePrintSpec(
summary=['name', 'network'],
field_mappings=(
('name', 'name'),
('description', 'description'),
('network', 'network'),
('source-ips', 'sourceRanges'),
('source-tags', 'sourceTags'),
('target-tags', 'targetTags')),
detail=(
('name', 'name'),
('description', 'description'),
('creation-time', 'creationTimestamp'),
('network', 'network'),
('source-ips', 'sourceRanges'),
('source-tags', 'sourceTags'),
('target-tags', 'targetTags')),
sort_by='name')
resource_collection_name = 'firewalls'
def __init__(self, name, flag_values):
super(FirewallCommand, self).__init__(name, flag_values)
def GetDetailRow(self, result):
"""Returns an associative list of items for display in a detail table.
Args:
result: A dict returned by the server.
Returns:
A list.
"""
data = []
# Add the rules
for allowed in result.get('allowed', []):
as_string = str(allowed['IPProtocol'])
if allowed.get('ports'):
as_string += ': %s' % ', '.join(allowed['ports'])
data.append(('allowed', as_string))
return data
class FirewallRules(object):
"""Class representing the list of a firewall's rules.
This class is only used for parsing a firewall from command-line flags,
for printing the firewall, we simply dump the JSON.
"""
@staticmethod
def ParsePortSpecs(port_spec_strings):
"""Parse the port-specification portion of firewall rules.
This takes the value of the 'allowed' flag and builds the
corresponding firewall rules, excluding the 'source' fields.
Args:
port_spec_strings: A list of strings specifying the port-specific
components of a firewall rule. These are of the form
"(<protocol>)?(:<port>('-'<port>)?)?"
Returns:
A list of dict values containing a protocol string and a list
of port range strings. This is a substructure of the firewall
rule dictionaries, which additionally contain a 'source' field.
Raises:
ValueError: If any of the input strings are malformed.
"""
def _AddToPortSpecs(protocol, port_string, port_specs):
"""Ensure the specified rule for this protocol allows the given port(s).
If there is no port_string specified it implies all ports are allowed,
and whatever is in the port_specs map for that protocol get clobbered.
This method also makes sure that any protocol entry without a ports
member does not get further restricted.
Args:
protocol: The protocol under which the given port range is allowed.
port_string: The string specification of what ports are allowed.
port_specs: The mapping from protocols to firewall rules.
"""
port_spec_entry = port_specs.setdefault(protocol,
{'IPProtocol': str(protocol),
'ports': []})
if 'ports' in port_spec_entry:
# We only handle the 'then' case because in the other case the
# existing entry already allows all ports.
if not port_string:
# A missing 'ports' field indicates all ports are allowed.
port_spec_entry.pop('ports')
else:
port_spec_entry['ports'].append(port_string)
port_specs = {}
for port_spec_string in port_spec_strings:
protocol = None
port_string = None
parts = port_spec_string.split(':')
if len(parts) > 2:
raise ValueError('Invalid allowed entry: %s' %
port_spec_string)
elif len(parts) == 2:
if parts[0]:
protocol = utils.ParseProtocol(parts[0])
port_string = utils.ReplacePortNames(parts[1])
else:
protocol = utils.ParseProtocol(parts[0])
if protocol:
_AddToPortSpecs(protocol, port_string, port_specs)
else:
# Add entries for both UPD and TCP
_AddToPortSpecs(socket.getprotobyname('tcp'), port_string, port_specs)
_AddToPortSpecs(socket.getprotobyname('udp'), port_string, port_specs)
return port_specs.values()
def __init__(self, allowed, allowed_ip_sources):
self.port_specs = FirewallRules.ParsePortSpecs(allowed)
self.source_ranges = allowed_ip_sources
self.source_tags = []
self.target_tags = []
def SetTags(self, source_tags, target_tags):
self.source_tags = sorted(set(source_tags))
self.target_tags = sorted(set(target_tags))
def AddToFirewall(self, firewall):
if self.source_ranges:
firewall['sourceRanges'] = self.source_ranges
if self.source_tags:
firewall['sourceTags'] = self.source_tags
if self.target_tags:
firewall['targetTags'] = self.target_tags
firewall['allowed'] = self.port_specs
class AddFirewall(FirewallCommand):
"""Create a new firewall rule to allow incoming traffic to a network."""
positional_args = '<firewall-name>'
def __init__(self, name, flag_values):
super(AddFirewall, self).__init__(name, flag_values)
flags.DEFINE_string('description',
'',
'An optional Firewall description.',
flag_values=flag_values)
flags.DEFINE_string('network',
'default',
'Specifies which network this firewall applies to.',
flag_values=flag_values)
flags.DEFINE_list('allowed',
None,
'[Required] Specifies a list of allowed ports for this '
'firewall. Each entry must be a combination of the '
'protocol and the port or port range in the following '
'form: \'<protocol>:<port>-<port>\' or '
'\'<protocol>:<port>\'. To specify multiple ports, '
'protocols, or ranges, provide them as comma'
'-separated entries. For example: '
'\'--allowed=tcp:ssh,udp:5000-6000,tcp:80,icmp\'.',
flag_values=flag_values)
flags.DEFINE_list('allowed_ip_sources',
[],
'Specifies a list of IP addresses that are allowed '
'to talk to instances within the network, through the '
'<protocols>:<ports> described by the \'--allowed\' '
'flag. If no IP or tag sources are listed, all sources '
'will be allowed.',
flag_values=flag_values)
flags.DEFINE_list('allowed_tag_sources',
[],
'Specifies a list of instance tags that are allowed to '
'talk to instances within the network, through the '
'<protocols>:<ports> described by the \'--allowed\' '
'flag. If specifying multiple tags, provide them as '
'comma-separated entries. For example, '
'\'--allowed_tag_sources=www,database,frontend\'. '
'If no tag or ip sources are listed, all sources will '
'be allowed.',
flag_values=flag_values)
flags.DEFINE_list('target_tags',
[],
'Specifies a set of tagged instances that this '
'firewall applies to. To specify multiple tags, '
'provide them as comma-separated entries. If no tags '
'are listed, this firewall applies to all instances in '
'the network.',
flag_values=flag_values)
def Handle(self, firewall_name):
"""Add the specified firewall.
Args:
firewall_name: The name of the firewall to add.
Returns:
The result of inserting the firewall.
Raises:
gcutil_errors.CommandError: If the passed flag values cannot be
interpreted.
"""
if not self._flags.allowed:
raise gcutil_errors.CommandError(
'You must specify at least one rule through --allowed.')
firewall_context = self._context_parser.ParseContextOrPrompt('firewalls',
firewall_name)
firewall_resource = {
'kind': self._GetResourceApiKind('firewall'),
'name': firewall_context['firewall'],
'description': self._flags.description,
}
if self._flags.network is not None:
firewall_resource['network'] = self._context_parser.NormalizeOrPrompt(
'networks', self._flags.network)
if (not self._flags.allowed_ip_sources and
not self._flags.allowed_tag_sources):
self._flags.allowed_ip_sources.append('0.0.0.0/0')
try:
firewall_rules = FirewallRules(self._flags.allowed,
self._flags.allowed_ip_sources)
firewall_rules.SetTags(self._flags.allowed_tag_sources,
self._flags.target_tags)
firewall_rules.AddToFirewall(firewall_resource)
firewall_request = self.api.firewalls.insert(
project=firewall_context['project'], body=firewall_resource)
return firewall_request.execute()
except ValueError, e:
raise gcutil_errors.CommandError(e)
class GetFirewall(FirewallCommand):
"""Get a firewall."""
positional_args = '<firewall-name>'
def __init__(self, name, flag_values):
super(GetFirewall, self).__init__(name, flag_values)
def Handle(self, firewall_name):
"""Get the specified firewall.
Args:
firewall_name: The name of the firewall to get.
Returns:
The result of getting the firewall.
"""
firewall_context = self._context_parser.ParseContextOrPrompt('firewalls',
firewall_name)
firewall_request = self.api.firewalls.get(
project=firewall_context['project'],
firewall=firewall_context['firewall'])
return firewall_request.execute()
class DeleteFirewall(FirewallCommand):
"""Delete one or more firewall rules.
Specify multiple firewalls as multiple arguments. The firewalls will be
deleted in parallel.
"""
positional_args = '<firewall-name-1> ... <firewall-name-n>'
safety_prompt = 'Delete firewall'
def __init__(self, name, flag_values):
super(DeleteFirewall, self).__init__(name, flag_values)
def Handle(self, *firewall_names):
"""Delete the specified firewall.
Args:
*firewall_names: The names of the firewalls to delete.
Returns:
Tuple (results, exceptions) - results of deleting the firewalls.
"""
requests = []
for name in firewall_names:
firewall_context = self._context_parser.ParseContextOrPrompt('firewalls',
name)
requests.append(self.api.firewalls.delete(
project=firewall_context['project'],
firewall=firewall_context['firewall']))
results, exceptions = self.ExecuteRequests(requests)
return (self.MakeListResult(results, 'operationList'), exceptions)
class ListFirewalls(FirewallCommand, command_base.GoogleComputeListCommand):
"""List the firewall rules for a project."""
def ListFunc(self):
"""Returns the function for listing firewalls."""
return self.api.firewalls.list
def AddCommands():
appcommands.AddCmd('addfirewall', AddFirewall)
appcommands.AddCmd('getfirewall', GetFirewall)
appcommands.AddCmd('deletefirewall', DeleteFirewall)
appcommands.AddCmd('listfirewalls', ListFirewalls)
| harshilasu/LinkurApp | y/google-cloud-sdk/platform/gcutil/lib/google_compute_engine/gcutil_lib/firewall_cmds.py | Python | gpl-3.0 | 12,747 | 0.005021 |
import os
import os.path
from freight.constants import PROJECT_ROOT
from freight.exceptions import CommandError
class UnknownRevision(CommandError):
pass
class Vcs(object):
ssh_connect_path = os.path.join(PROJECT_ROOT, "bin", "ssh-connect")
def __init__(self, workspace, url, username=None):
self.url = url
self.username = username
self.workspace = workspace
self._path_exists = None
@property
def path(self):
return self.workspace.path
def get_default_env(self):
return {}
def run(self, command, capture=False, workspace=None, *args, **kwargs):
if workspace is None:
workspace = self.workspace
if not self.exists(workspace=workspace):
kwargs.setdefault("cwd", None)
env = kwargs.pop("env", {})
for key, value in self.get_default_env().items():
env.setdefault(key, value)
env.setdefault("FREIGHT_SSH_REPO", self.url)
kwargs["env"] = env
if capture:
handler = workspace.capture
else:
handler = workspace.run
rv = handler(command, *args, **kwargs)
if isinstance(rv, bytes):
rv = rv.decode("utf8")
if isinstance(rv, str):
return rv.strip()
return rv
def exists(self, workspace=None):
if workspace is None:
workspace = self.workspace
return os.path.exists(workspace.path)
def clone_or_update(self):
if self.exists():
self.update()
else:
self.clone()
def clone(self):
raise NotImplementedError
def update(self):
raise NotImplementedError
def checkout(self, ref):
raise NotImplementedError
def get_sha(self, ref):
"""
Given a `ref` return the fully qualified version.
"""
raise NotImplementedError
def get_default_revision(self):
raise NotImplementedError
| getsentry/freight | freight/vcs/base.py | Python | apache-2.0 | 1,987 | 0 |
"""Top level site urls."""
from django.conf.urls import patterns, include, url
from quiz2 import views
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', views.home, name='home'),
url(r'^register/$', views.register, name='register'),
url(r'^login/$', views.user_login, name='login'),
url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}, name='logout'),
url(r'^admin/', include(admin.site.urls)),
url(r'^quiz/', include('quiz2.apps.quiz.urls',
app_name='quizapp', namespace='quizapp'
)),
url(r'^user/account/$', views.user_account, name='user_account'),
url(r'^user/password/reset/$',
'django.contrib.auth.views.password_reset',
{'post_reset_redirect' : '/user/password/reset/done/'},
name="password_reset"),
(r'^user/password/reset/done/$',
'django.contrib.auth.views.password_reset_done'),
(r'^user/password/reset/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$',
'django.contrib.auth.views.password_reset_confirm',
{'post_reset_redirect' : '/user/password/done/'}),
(r'^user/password/done/$',
'django.contrib.auth.views.password_reset_complete'),
)
| hillscottc/quiz2 | quiz2/urls.py | Python | agpl-3.0 | 1,275 | 0.002353 |
def get_weekday(current_weekday, days_ahead):
""" (int, int) -> int
Return which day of the week it will be days_ahead days from
current_weekday.
current_weekday is the current day of the week and is in the range 1-7,
indicating whether today is Sunday (1), Monday (2), ..., Saturday (7).
days_ahead is the number of days after today.
>>> get_weekday(3, 1)
4
>>> get_weekday(6, 1)
7
>>> get_weekday(7, 1)
1
>>> get_weekday(1, 0)
1
>>> get_weekday(4, 7)
4
>>> get_weekday(7, 72)
2
"""
return current_weekday + days_ahead % 7
def days_difference(day1, day2):
""" (int, int) -> int
Return the number of days between day1 and day2, which are both
in the range 1-365 (thus indicating the day of the year).
>>> days_difference(200, 224)
24
>>> days_difference(50, 50)
0
>>> days_difference(100, 99)
-1
"""
return day2 - day1
def get_birthday_weekday(current_weekday, current_day, birthday_day):
""" (int, int, int) -> int
Return the day of the week it will be on birthday_day, given that
the day of the week is current_weekday and the day of the year is
current_day.
current_weekday is the current day of the week and is in the range 1-7,
indicating whether today is Sunday (1), Monday (2), ..., Saturday (7).
current_day and birthday_day are both in the range 1-365.
>>> get_birthday_weekday(5, 3, 4)
6
>>> get_birthday_weekday(5, 3, 116)
6
>>> get_birthday_weekday(6, 116, 3)
5
"""
days_diff = days_difference(current_day, birthday_day)
return get_weekday(current_weekday, days_diff)
| simontakite/sysadmin | pythonscripts/practicalprogramming/functions/days_bad.py | Python | gpl-2.0 | 1,684 | 0.000594 |
from .base import * # noqa
INTERNAL_IPS = INTERNAL_IPS + ('', )
ALLOWED_HOSTS = []
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'app_kdl_liv',
'USER': 'app_kdl',
'PASSWORD': '',
'HOST': ''
},
}
# -----------------------------------------------------------------------------
# GLOBALS FOR JS
# -----------------------------------------------------------------------------
# Google Analytics ID
GA_ID = 'UA-67707155-1'
# -----------------------------------------------------------------------------
# Django Extensions
# http://django-extensions.readthedocs.org/en/latest/
# -----------------------------------------------------------------------------
try:
import django_extensions # noqa
INSTALLED_APPS = INSTALLED_APPS + ('django_extensions',)
except ImportError:
pass
# -----------------------------------------------------------------------------
# Local settings
# -----------------------------------------------------------------------------
try:
from .local import * # noqa
except ImportError:
pass
| kingsdigitallab/kdl-django | kdl/settings/liv.py | Python | mit | 1,127 | 0 |
# -*- coding: utf-8 -*-
from django.conf.urls import url
from projects import views
urlpatterns = [
url(r'^$', views.MainPageView.as_view(), name='main'),
# Inlist
url(r'^inlist/$', views.InlistView.as_view(), name='inlist'),
url(r'^inlist/(?P<pk>[0-9]+)/delete/$', views.InlistItemDelete.as_view(),
name='delete_inlist'),
url(r'^inlist/(?P<pk>[0-9]+)/convert/action/$',
views.InlistItemToActionView.as_view(), name='convert_inlist_action'),
url(r'^inlist/(?P<inlistitem>[0-9]+)/convert/project/$',
views.CreateProjectView.as_view(), name='convert_inlist_project'),
# Actions
url(r'^actions/(?P<pk>[0-9]+)/delete/$',
views.ActionlistItemDelete.as_view(), name='delete_actionlist'),
url(r'^actions/(?P<pk>[0-9]+)/complete/$',
views.ActionCompleteView.as_view(), name='complete_action'),
url(r'^actions/(?P<pk>[0-9]+)/edit/$', views.EditActionView.as_view(),
name='edit_action'),
# Projects
url(r'^project/(?P<pk>[0-9]+)/$', views.ProjectView.as_view(),
name='project'),
url(r'^project/create/$', views.CreateProjectView.as_view(),
name='create_project'),
url(r'project/(?P<pk>[0-9]+)/edit/$', views.EditProjectView.as_view(),
name='edit_project'),
url(r'project/(?P<pk>[0-9]+)/delete/$', views.DeleteProjectView.as_view(),
name='delete'),
url(r'sort/actions/$', views.ActionlistSortView.as_view(),
name='sort_actions'),
]
| XeryusTC/projman | projects/urls.py | Python | mit | 1,477 | 0 |
from pydevin import *
import math
# ball parameters definitions
BALL_POS_Y_MAX = 115
BALL_POS_Y_MIN = 5
BALL_POS_Y_CENTER = (BALL_POS_Y_MAX + BALL_POS_Y_MIN) / 2.0
BALL_POS_X_MAX = 125
BALL_POS_X_MIN = 20
BALL_POS_X_CENTER = (BALL_POS_X_MAX + BALL_POS_X_MIN) / 2.0
A_X = -1.0/(BALL_POS_X_MAX - BALL_POS_X_CENTER)
B_X = -(A_X)*BALL_POS_X_CENTER
A_Y = -1.0/(BALL_POS_Y_MIN - BALL_POS_Y_CENTER)
B_Y = -(A_Y)*BALL_POS_Y_CENTER
# ball tracking
x_buffer = [ 0 for i in range(16) ]
y_buffer = [ 0 for i in range(16) ]
total_sum_x = 0
total_sum_y = 0
curr_index = 0
# end of ball tracking
# ball parameters
x_pos = 0
y_pos = 0
f_x_pos = 0.0
f_y_pos = 0.0
# end of ball parameters
pos_computed = 0
# motor params
alpha_x = 1 # l=1500, h=2500
beta_x = 0
alpha_y = 1 # l=1500, h=2500
beta_y = 0
pdev = PyDevin()
pdev.init()
def norm(x, y):
return math.sqrt(x*x + y*y)
def r_range(v, l, h):
if(v < l):
return l
elif(v > h):
return h
return v
# ball tracking
# normalizes ball position
def normalize_ball_params():
global A_X, A_Y, B_X, B_Y, f_x_pos, f_y_pos
f_x_pos = r_range(A_X*x_pos + B_X, -1.0, 1.0)
f_y_pos = r_range(A_Y*y_pos + B_Y, -1.0, 1.0)
def compute_pos(x_cur, y_cur):
global pos_computed, x_pos, y_pos, curr_index, total_sum_x, total_sum_y, x_buffer, y_buffer
if(pos_computed == 0 or norm(x_pos - x_cur, y_pos - y_cur) < 100):
# this is a very efficient way to average
# over 16 position samples without any sum
# or division
total_sum_x = total_sum_x - x_buffer[curr_index] + x_cur
x_buffer[curr_index] = x_cur
total_sum_y = total_sum_y - y_buffer[curr_index] + y_cur
y_buffer[curr_index] = y_cur
x_pos = total_sum_x >> 4 # division by 16
y_pos = total_sum_y >> 4
normalize_ball_params()
if(pos_computed == 0 and curr_index == 15):
pos_computed = 1
curr_index = (curr_index + 1) % 16
def cameraEvent():
global pdev
key = pdev.get_camera()
# raw position extraction
y_cur = ((key & 0x7F))
x_cur = (((key >> 8) & 0x7F))
pol = (key >> 7) & 0x01
check = (key >> 15) & 0x01
if(pol == 1):
compute_pos(x_cur, y_cur)
normalize_ball_params()
def getCamera():
global f_x_pos, f_y_pos
return (f_x_pos, f_y_pos)
def setMotorRange(l, h):
global alpha_x, beta_x, alpha_y, beta_y
alpha_x = 1000.0 / (h - l)
beta_x = 1680.0 - 1000.0*(l/(h-l))
alpha_y = 1000.0 / (h - l)
beta_y = 1450.0 - 1000.0*(l/(h-l))
def sendCommand(x, y):
global alpha_x, beta_x, alpha_y, beta_y
f_x = alpha_x*x + beta_x
f_y = alpha_y*y + beta_y
pdev.send_motor(int(f_y) | (int(f_x) << 16 ))
| QJonny/spin_emulator | pydevin/devinManager.py | Python | lgpl-2.1 | 2,573 | 0.026817 |
# -*- coding: utf-8 -*-
# © 2016 Cyril Gaudin (Camptocamp)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import api, fields, models
class MrpConfigSettings(models.TransientModel):
""" Add settings for dismantling BOM.
"""
_inherit = 'mrp.config.settings'
dismantling_product_choice = fields.Selection([
(0, "Main BOM product will be set randomly"),
(1, "User have to choose which component to set as main BOM product")
], "Dismantling BOM")
@api.multi
def get_default_dismantling_product_choice(self, fields):
product_choice = self.env["ir.config_parameter"].get_param(
'mrp.bom.dismantling.product_choice', default=0
)
return {'dismantling_product_choice': product_choice}
@api.multi
def set_dismantling_product_choice(self):
self.env["ir.config_parameter"].set_param(
'mrp.bom.dismantling.product_choice',
self.dismantling_product_choice
)
| houssine78/addons | mrp_bom_dismantling/models/res_config.py | Python | agpl-3.0 | 1,008 | 0 |
'''
9.5 First, you have to resolve assignment9_3. This is slightly different.
This program records the domain name (instead of the address) where the message
was sent from instead of who the mail came from (i.e., the whole email address).
At the end of the program, print out the contents of your dictionary.
Sample:
python assignment9_5_dictionary.py
{'media.berkeley.edu': 4, 'uct.ac.za': 6, 'umich.edu': 7,
'gmail.com': 1, 'caret.cam.ac.uk': 1, 'iupui.edu': 8}
'''
dDomain = dict()
try:
flHand = open("mbox-short.txt")
except:
print('There is no "mbox-short.txt" file in the same folder as this script.')
else:
for sLine in flHand:
if not sLine.startswith('From '):
continue
lWords = sLine.split()
lEmailDomain = lWords[1].split('@')
dDomain[lEmailDomain[1]] = dDomain.get(lEmailDomain[1], 0) + 1
print (dDomain)
| hosseinoliabak/learningpy | 09_5_dictionary.py | Python | gpl-3.0 | 877 | 0.004561 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Reference_Domain.external_id'
db.add_column(u'django_reference_data_reference_domain', 'external_id',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'Reference_Domain.guid'
db.add_column(u'django_reference_data_reference_domain', 'guid',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Reference_Domain.external_id'
db.delete_column(u'django_reference_data_reference_domain', 'external_id')
# Deleting field 'Reference_Domain.guid'
db.delete_column(u'django_reference_data_reference_domain', 'guid')
models = {
u'django_reference_data.postal_code': {
'Meta': {'object_name': 'Postal_Code'},
'admin_code1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'admin_code2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'admin_code3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'admin_name1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'admin_name2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'admin_name3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'country_code': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'lat_long_accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'}),
'place_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'django_reference_data.reference_domain': {
'Meta': {'object_name': 'Reference_Domain'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'domain_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'domain_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'domain_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_multimedia': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_news': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'long_name': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'source_details': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['django_reference_data'] | jonathanmorgan/django_reference_data | migrations/0005_auto__add_field_reference_domain_external_id__add_field_reference_doma.py | Python | gpl-3.0 | 5,852 | 0.00769 |
import os
import sys
import json
import yaml
import time
import tempfile
import inspect
import warnings
import re
import math
import codecs
class StimelaCabRuntimeError(RuntimeError):
pass
class StimelaProcessRuntimeError(RuntimeError):
pass
CPUS = 1
from .xrun_poll import xrun
def assign(key, value):
frame = inspect.currentframe().f_back
frame.f_globals[key] = value
def readJson(conf):
with open(conf, "r") as _std:
jdict = yaml.safe_load(_std)
return jdict
def writeJson(config, dictionary):
with codecs.open(config, 'w', 'utf8') as std:
std.write(json.dumps(dictionary, ensure_ascii=False))
def get_Dockerfile_base_image(image):
if os.path.isfile(image):
dockerfile = image
else:
dockerfile = "{:s}/Dockerfile".format(image)
with open(dockerfile, "r") as std:
_from = ""
for line in std.readlines():
if line.startswith("FROM"):
_from = line
return _from
def change_Dockerfile_base_image(path, _from, label, destdir="."):
if os.path.isfile(path):
dockerfile = path
dirname = os.path.dirname(path)
else:
dockerfile = "{:s}/Dockerfile".format(path)
dirname = path
with open(dockerfile, "r") as std:
lines = std.readlines()
for line in lines:
if line.startswith("FROM"):
lines.remove(line)
temp_dir = tempfile.mkdtemp(
prefix="tmp-stimela-{:s}-".format(label), dir=destdir)
xrun(
"cp", ["-r", "{:s}/Dockerfile {:s}/src".format(dirname, dirname), temp_dir])
dockerfile = "{:s}/Dockerfile".format(temp_dir)
with open(dockerfile, "w") as std:
std.write("{:s}\n".format(_from))
for line in lines:
std.write(line)
return temp_dir, dockerfile
def get_base_images(logfile, index=1):
with opEn(logfile, "r") as std:
string = std.read()
separator = "[================================DONE==========================]"
log = string.split(separator)[index-1]
images = []
for line in log.split("\n"):
if line.find("<=BASE_IMAGE=>") > 0:
tmp = line.split("<=BASE_IMAGE=>")[-1]
image, base = tmp.split("=")
images.append((image.strip(), base))
return images
def icasa(taskname, mult=None, clearstart=False, loadthese=[], **kw0):
"""
runs a CASA task given a list of options.
A given task can be run multiple times with a different options,
in this case the options must be parsed as a list/tuple of dictionaries via mult, e.g
icasa('exportfits',mult=[{'imagename':'img1.image','fitsimage':'image1.fits},{'imagename':'img2.image','fitsimage':'image2.fits}]).
Options you want be common between the multiple commands should be specified as key word args.
"""
# create temp directory from which to run casapy
td = tempfile.mkdtemp(dir='.')
# we want get back to the working directory once casapy is launched
cdir = os.path.realpath('.')
# load modules in loadthese
_load = ""
if "os" not in loadthese or "import os" not in loadthese:
loadthese.append("os")
if loadthese:
exclude = filter(lambda line: line.startswith("import")
or line.startswith("from"), loadthese)
for line in loadthese:
if line not in exclude:
line = "import %s" % line
_load += "%s\n" % line
if mult:
if isinstance(mult, (tuple, list)):
for opts in mult:
opts.update(kw0)
else:
mult.upadte(kw0)
mult = [mult]
else:
mult = [kw0]
run_cmd = """ """
for kw in mult:
task_cmds = []
for key, val in kw.items():
if isinstance(val, (str, unicode)):
val = '"%s"' % val
task_cmds .append('%s=%s' % (key, val))
task_cmds = ", ".join(task_cmds)
run_cmd += """
%s
os.chdir('%s')
%s
%s(%s)
""" % (_load, cdir, "clearstart()" if clearstart else "", taskname, task_cmds)
tf = tempfile.NamedTemporaryFile(suffix='.py')
tf.write(run_cmd)
tf.flush()
t0 = time.time()
# all logging information will be in the pyxis log files
print("Running {}".format(run_cmd))
xrun("cd", [td, "&& casa --nologger --log2term --nologfile -c", tf.name])
# log taskname.last
task_last = '%s.last' % taskname
if os.path.exists(task_last):
with opEn(task_last, 'r') as last:
print('%s.last is: \n %s' % (taskname, last.read()))
# remove temp directory. This also gets rid of the casa log files; so long suckers!
xrun("rm", ["-fr ", td, task_last])
tf.close()
def stack_fits(fitslist, outname, axis=0, ctype=None, keep_old=False, fits=False):
""" Stack a list of fits files along a given axiis.
fitslist: list of fits file to combine
outname: output file name
axis: axis along which to combine the files
fits: If True will axis FITS ordering axes
ctype: Axis label in the fits header (if given, axis will be ignored)
keep_old: Keep component files after combining?
"""
import numpy
try:
import pyfits
except ImportError:
warnings.warn(
"Could not find pyfits on this system. FITS files will not be stacked")
sys.exit(0)
hdu = pyfits.open(fitslist[0])[0]
hdr = hdu.header
naxis = hdr['NAXIS']
# find axis via CTYPE key
if ctype is not None:
for i in range(1, naxis+1):
if hdr['CTYPE%d' % i].lower().startswith(ctype.lower()):
axis = naxis - i # fits to numpy convention
elif fits:
axis = naxis - axis
fits_ind = abs(axis-naxis)
crval = hdr['CRVAL%d' % fits_ind]
imslice = [slice(None)]*naxis
_sorted = sorted([pyfits.open(fits) for fits in fitslist],
key=lambda a: a[0].header['CRVAL%d' % (naxis-axis)])
# define structure of new FITS file
nn = [hd[0].header['NAXIS%d' % (naxis-axis)] for hd in _sorted]
shape = list(hdu.data.shape)
shape[axis] = sum(nn)
data = numpy.zeros(shape, dtype=float)
for i, hdu0 in enumerate(_sorted):
h = hdu0[0].header
d = hdu0[0].data
imslice[axis] = range(sum(nn[:i]), sum(nn[:i+1]))
data[imslice] = d
if crval > h['CRVAL%d' % fits_ind]:
crval = h['CRVAL%d' % fits_ind]
# update header
hdr['CRVAL%d' % fits_ind] = crval
hdr['CRPIX%d' % fits_ind] = 1
pyfits.writeto(outname, data, hdr, clobber=True)
print("Successfully stacked images. Output image is %s" % outname)
# remove old files
if not keep_old:
for fits in fitslist:
os.system('rm -f %s' % fits)
def substitute_globals(string, globs=None):
sub = set(re.findall('\{(.*?)\}', string))
globs = globs or inspect.currentframe().f_back.f_globals
if sub:
for item in map(str, sub):
string = string.replace("${%s}" % item, globs[item])
return string
else:
return False
def get_imslice(ndim):
imslice = []
for i in xrange(ndim):
if i < ndim-2:
imslice.append(0)
else:
imslice.append(slice(None))
return imslice
def addcol(msname, colname=None, shape=None,
data_desc_type='array', valuetype=None, init_with=0, **kw):
""" add column to MS
msanme : MS to add colmn to
colname : column name
shape : shape
valuetype : data type
data_desc_type : 'scalar' for scalar elements and array for 'array' elements
init_with : value to initialise the column with
"""
import numpy
import pyrap.tables
tab = pyrap.tables.table(msname, readonly=False)
try:
tab.getcol(colname)
print('Column already exists')
except RuntimeError:
print('Attempting to add %s column to %s' % (colname, msname))
from pyrap.tables import maketabdesc
valuetype = valuetype or 'complex'
if shape is None:
dshape = list(tab.getcol('DATA').shape)
shape = dshape[1:]
if data_desc_type == 'array':
from pyrap.tables import makearrcoldesc
# God forbid this (or the TIME) column doesn't exist
coldmi = tab.getdminfo('DATA')
coldmi['NAME'] = colname.lower()
tab.addcols(maketabdesc(makearrcoldesc(
colname, init_with, shape=shape, valuetype=valuetype)), coldmi)
elif data_desc_type == 'scalar':
from pyrap.tables import makescacoldesc
coldmi = tab.getdminfo('TIME')
coldmi['NAME'] = colname.lower()
tab.addcols(maketabdesc(makescacoldesc(
colname, init_with, valuetype=valuetype)), coldmi)
print('Column added successfuly.')
if init_with:
nrows = dshape[0]
rowchunk = nrows//10 if nrows > 1000 else nrows
for row0 in range(0, nrows, rowchunk):
nr = min(rowchunk, nrows-row0)
dshape[0] = nr
tab.putcol(colname, numpy.ones(
dshape, dtype=valuetype)*init_with, row0, nr)
tab.close()
def sumcols(msname, col1=None, col2=None, outcol=None, cols=None, suntract=False):
""" add col1 to col2, or sum columns in 'cols' list.
If subtract, subtract col2 from col1
"""
from pyrap.tables import table
tab = table(msname, readonly=False)
if cols:
data = 0
for col in cols:
data += tab.getcol(col)
else:
if subtract:
data = tab.getcol(col1) - tab.getcol(col2)
else:
data = tab.getcol(col1) + tab.getcol(col2)
rowchunk = nrows//10 if nrows > 1000 else nrows
for row0 in range(0, nrows, rowchunk):
nr = min(rowchunk, nrows-row0)
tab.putcol(outcol, data[row0:row0+nr], row0, nr)
tab.close()
def copycol(msname, fromcol, tocol):
from pyrap.tables import table
tab = table(msname, readonly=False)
data = tab.getcol(fromcol)
if tocol not in tab.colnames():
addcol(msname, tocol)
nrows = tab.nrows()
rowchunk = nrows//10 if nrows > 5000 else nrows
for row0 in range(0, nrows, rowchunk):
nr = min(rowchunk, nrows-row0)
tab.putcol(tocol, data[row0:row0+nr], row0, nr)
tab.close()
def cab_dict_update(dictionary, key=None, value=None, options=None):
if options is None:
options = {key: value}
for key, value in options.items():
dictionary[key] = dictionary.pop(key, None) or value
return dictionary
def compute_vis_noise(msname, sefd, spw_id=0):
"""Computes nominal per-visibility noise"""
from pyrap.tables import table
tab = table(msname)
spwtab = table(msname + "/SPECTRAL_WINDOW")
freq0 = spwtab.getcol("CHAN_FREQ")[spw_id, 0]
wavelength = 300e+6/freq0
bw = spwtab.getcol("CHAN_WIDTH")[spw_id, 0]
dt = tab.getcol("EXPOSURE", 0, 1)[0]
dtf = (tab.getcol("TIME", tab.nrows()-1, 1)-tab.getcol("TIME", 0, 1))[0]
# close tables properly, else the calls below will hang waiting for a lock...
tab.close()
spwtab.close()
print(">>> %s freq %.2f MHz (lambda=%.2fm), bandwidth %.2g kHz, %.2fs integrations, %.2fh synthesis" % (
msname, freq0*1e-6, wavelength, bw*1e-3, dt, dtf/3600))
noise = sefd/math.sqrt(abs(2*bw*dt))
print(">>> SEFD of %.2f Jy gives per-visibility noise of %.2f mJy" %
(sefd, noise*1000))
return noise
| SpheMakh/Stimela | stimela/utils/__init__.py | Python | gpl-2.0 | 11,642 | 0.002233 |
from django.contrib import admin
from achievs.models import Achievement
# from achievs.models import Gold
# from achievs.models import Silver
# from achievs.models import Bronze
# from achievs.models import Platinum
from achievs.models import Level
# class PlatinumInline(admin.StackedInline):
# model=Platinum
# class GoldInline(admin.StackedInline):
# model=Gold
# class SilverInline(admin.StackedInline):
# model=Silver
# class BronzeInline(admin.StackedInline):
# model=Bronze
class LevelInline(admin.StackedInline):
model=Level
class AchievementAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['name']}),
('Date information', {'fields': ['pub_date']}),
]
#inlines=[GoldInline, SilverInline, BronzeInline, PlatinumInline]
inlines=[LevelInline]
list_display = ('name', 'pub_date')
list_filter=['pub_date']
search_fields=['name']
date_hierarchy='pub_date'
# admin.site.register(Gold)
# admin.site.register(Silver)
# admin.site.register(Bronze)
# admin.site.register(Platinum)
admin.site.register(Level)
admin.site.register(Achievement, AchievementAdmin) | eawerbaneth/Scoreboard | achievs/admin.py | Python | bsd-3-clause | 1,116 | 0.031362 |
# ============================================================================
#
# Copyright (c) 2007-2010 Integral Technology Solutions Pty Ltd,
# All Rights Reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE
# LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# FOR FURTHER INFORMATION PLEASE SEE THE INTEGRAL TECHNOLOGY SOLUTIONS
# END USER LICENSE AGREEMENT (ELUA).
#
# ============================================================================
##
## persist.py
##
## This script contains functions that manipulate persistent stores.
#=======================================================================================
# Global variables
#=======================================================================================
persistModule = '1.2.0'
log.debug('Loading module [persist.py] version [' + persistModule + ']')
#=======================================================================================
# Configure filestores
#=======================================================================================
def createFileStores(resourcesProperties, domainProperties):
fileStores=resourcesProperties.getProperty('persistent.filestores')
if fileStores is None or len(fileStores)==0:
log.info('Persistent Store is not specified, skipping.')
else:
fileStoreList=fileStores.split(',')
for fileStore in fileStoreList:
__createFileStore(fileStore, resourcesProperties, domainProperties)
#=======================================================================================
# Configure filestore
#=======================================================================================
def __createFileStore(fileStore, resourcesProperties, domainProperties):
fileStoreName=resourcesProperties.getProperty('persistent.filestore.' + str(fileStore) + '.Name')
fileStoreLocation=resourcesProperties.getProperty('persistent.filestore.' + str(fileStore) + '.Location')
tmpTarget=resourcesProperties.getProperty('persistent.filestore.' + str(fileStore) + '.Target')
migratable=resourcesProperties.getProperty('persistent.filestore.' + str(fileStore) + '.Migratable')
replaceFlag=resourcesProperties.getProperty('persistent.filestore.' + str(fileStore) + '.Replace')
if replaceFlag is None:
replaceFlag = 'false'
targetServerName = None
try:
fileStore = None
fileStoreExist = 0
try:
cd('/')
fileStore = lookup(fileStoreName, 'FileStore')
except Exception, error:
log.info('Unable to find filestore [' + str(fileStoreName) + '], trying to create new one.')
if fileStore is None:
cd('/')
fileStore = create(fileStoreName, 'FileStore')
if tmpTarget is None or len(tmpTarget)==0:
targetServerName=domainProperties.getProperty('wls.admin.name')
targetServer = lookup(targetServerName, 'Server')
else:
targetServerName=domainProperties.getProperty('wls.server.' + str(tmpTarget) + '.name')
if migratable.upper()=='TRUE':
targetServerName = targetServerName + ' (migratable)'
targetServer = lookup(targetServerName, 'MigratableTarget')
else:
targetServer = lookup(targetServerName, 'Server')
try:
fileStore.addTarget(targetServer)
except Exception, error:
cancelEdit('y')
raise ScriptError, 'Unable to add filestore [' + str(fileStoreName) + '] to target server [' + str(targetServerName) + '] : ' + str(error)
else:
if not migratable is None and migratable.upper()=='TRUE' and isUpdateToPreviouslyCreatedDomain().upper()=='TRUE':
targetsArray = fileStore.getTargets()
for i in range(0, len(targetsArray)):
targetName = targetsArray[i].getName()
# If current target is not migratable
if targetName.find("(migratable)") < 0:
newTargetName = targetName + ' (migratable)'
targetServer = lookup(newTargetName, 'MigratableTarget')
jmsServersArray = cmo.getJMSServers()
for j in range(0, len(jmsServersArray)):
currentJMSServer = jmsServersArray[j]
currentPersistentStore = currentJMSServer.getPersistentStore()
if not currentPersistentStore is None and currentPersistentStore.getName()==fileStore.getName():
log.info('Upgrading target [' + targetName + '] in JMS Server [' + currentJMSServer.getName() + '] to migratable')
currentJMSServer.setTargets(jarray.array([targetServer], weblogic.management.configuration.MigratableTargetMBean))
log.info('Upgrading target [' + targetName + '] to migratable for persistent store [' + str(fileStore.getName()) + ']')
fileStore.setTargets(jarray.array([targetServer], weblogic.management.configuration.MigratableTargetMBean))
safAgents = cmo.getSAFAgents()
for k in range(0, len(safAgents)):
safAgent = safAgents[k]
safAgentTargets = safAgent.getTargets()
newSafAgentsArray = zeros(len(safAgentTargets), weblogic.management.configuration.MigratableTargetMBean)
for l in range(0, len(safAgentTargets)):
safAgentTarget = safAgentTargets[l]
safAgentTargetName = safAgentTarget.getName()
# If current target is not migratable
if safAgentTargetName.find("(migratable)") < 0:
newSafAgentTargetName = safAgentTargetName + ' (migratable)'
newSafAgentTarget = lookup(newSafAgentTargetName, 'MigratableTarget')
log.info('Setting migratable target [' + newSafAgentTarget.getName() + '] for SAF Agent [' + safAgent.getName() + '].')
newSafAgentsArray[l] = newSafAgentTarget
else:
log.info('Setting migratable target [' + safAgentTarget.getName() + '] for SAF Agent [' + safAgent.getName() + '].')
newSafAgentsArray[l] = safAgentTarget
log.info('Updating migratable targets for SAF Agent [' + safAgent.getName() + '].')
safAgent.setTargets(newSafAgentsArray)
fileStoreExist = 1
log.info('FileStore [' + str(fileStoreName) + '] already exists, checking REPLACE flag.')
if not fileStoreExist or isReplaceRequired(domainProperties.getProperty('REPLACE')) or replaceFlag.upper()=='TRUE':
if fileStoreExist and isReplaceRequired(domainProperties.getProperty('REPLACE')):
log.info('REPLACE flag is specified, start replacing FileStore [' + str(fileStoreName) + '] properties.')
file = File(fileStoreLocation)
if not file.exists():
if file.mkdirs():
log.info('File store directory [' + str(fileStoreLocation) + '] has been created successfully.')
fileStore.setDirectory(fileStoreLocation)
except Exception, error:
cancelEdit('y')
raise ScriptError, 'Unable to create filestore [' + str(fileStoreName) + '] for target server [' + str(targetServerName) + '] : ' + str(error)
| Integral-Technology-Solutions/ConfigNOW | wlst/persist.py | Python | mit | 8,036 | 0.015928 |
d = {}
for i in range(100000):
d[i] = i
JS_CODE = '''
var d = {};
for (var i = 0; i < 100000; i++) {
d[i] = i;
}
'''
| kikocorreoso/brython | www/speed/benchmarks/add_dict.py | Python | bsd-3-clause | 124 | 0 |
from __future__ import unicode_literals
from django.apps import AppConfig
class PaypalConfig(AppConfig):
name = 'paypal'
| linkingcharities/linkingcharities | Linking_Charities/payment/apps.py | Python | mit | 128 | 0 |
import matplotlib
matplotlib.use('WXAgg')
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
import CoolProp
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(2, 2))
ax = fig.add_subplot(111, projection='3d')
NT = 1000
NR = 1000
rho, t = np.logspace(np.log10(2e-3), np.log10(1100), NR), np.linspace(275.15, 700, NT)
RHO, T = np.meshgrid(rho, t)
P = CoolProp.CoolProp.PropsSI('P', 'D', RHO.reshape((NR * NT, 1)), 'T', T.reshape((NR * NT, 1)), 'REFPROP-Water').reshape(NT, NR)
Tsat = np.linspace(273.17, 647.0, 100)
psat = CoolProp.CoolProp.PropsSI('P', 'Q', 0, 'T', Tsat, 'Water')
rhoL = CoolProp.CoolProp.PropsSI('D', 'Q', 0, 'T', Tsat, 'Water')
rhoV = CoolProp.CoolProp.PropsSI('D', 'Q', 1, 'T', Tsat, 'Water')
ax.plot_surface(np.log(RHO), T, np.log(P), cmap=cm.jet, edgecolor='none')
ax.plot(np.log(rhoL), Tsat, np.log(psat), color='k', lw=2)
ax.plot(np.log(rhoV), Tsat, np.log(psat), color='k', lw=2)
ax.text(0.3, 800, 22, "CoolProp", size=12)
ax.set_frame_on(False)
ax.set_axis_off()
ax.view_init(22, -136)
ax.set_xlabel(r'$\ln\rho$ ')
ax.set_ylabel('$T$')
ax.set_zlabel('$p$')
plt.tight_layout()
plt.savefig('_static/PVTCP.png', transparent=True)
plt.savefig('_static/PVTCP.pdf', transparent=True)
plt.close()
| CoolProp/CoolProp | Web/scripts/logo_2013.py | Python | mit | 1,263 | 0.005542 |
#!/usr/bin/env python
from __future__ import absolute_import
import os
import shutil
import tempfile
from distutils.core import setup
from .Dependencies import cythonize, extended_iglob
from ..Utils import is_package_dir
from ..Compiler import Options
try:
import multiprocessing
parallel_compiles = int(multiprocessing.cpu_count() * 1.5)
except ImportError:
multiprocessing = None
parallel_compiles = 0
class _FakePool(object):
def map_async(self, func, args):
from itertools import imap
for _ in imap(func, args):
pass
def close(self): pass
def terminate(self): pass
def join(self): pass
def parse_directives(option, name, value, parser):
dest = option.dest
old_directives = dict(getattr(parser.values, dest,
Options.directive_defaults))
directives = Options.parse_directive_list(
value, relaxed_bool=True, current_settings=old_directives)
setattr(parser.values, dest, directives)
def parse_options(option, name, value, parser):
dest = option.dest
options = dict(getattr(parser.values, dest, {}))
for opt in value.split(','):
if '=' in opt:
n, v = opt.split('=', 1)
v = v.lower() not in ('false', 'f', '0', 'no')
else:
n, v = opt, True
options[n] = v
setattr(parser.values, dest, options)
def find_package_base(path):
base_dir, package_path = os.path.split(path)
while os.path.isfile(os.path.join(base_dir, '__init__.py')):
base_dir, parent = os.path.split(base_dir)
package_path = '%s/%s' % (parent, package_path)
return base_dir, package_path
def cython_compile(path_pattern, options):
pool = None
paths = map(os.path.abspath, extended_iglob(path_pattern))
try:
for path in paths:
if options.build_inplace:
base_dir = path
while not os.path.isdir(base_dir) or is_package_dir(base_dir):
base_dir = os.path.dirname(base_dir)
else:
base_dir = None
if os.path.isdir(path):
# recursively compiling a package
paths = [os.path.join(path, '**', '*.%s' % ext)
for ext in ('py', 'pyx')]
else:
# assume it's a file(-like thing)
paths = [path]
ext_modules = cythonize(
paths,
nthreads=options.parallel,
exclude_failures=options.keep_going,
exclude=options.excludes,
compiler_directives=options.directives,
force=options.force,
quiet=options.quiet,
**options.options)
if ext_modules and options.build:
if len(ext_modules) > 1 and options.parallel > 1:
if pool is None:
try:
pool = multiprocessing.Pool(options.parallel)
except OSError:
pool = _FakePool()
pool.map_async(run_distutils, [
(base_dir, [ext]) for ext in ext_modules])
else:
run_distutils((base_dir, ext_modules))
except:
if pool is not None:
pool.terminate()
raise
else:
if pool is not None:
pool.close()
pool.join()
def run_distutils(args):
base_dir, ext_modules = args
script_args = ['build_ext', '-i']
cwd = os.getcwd()
temp_dir = None
try:
if base_dir:
os.chdir(base_dir)
temp_dir = tempfile.mkdtemp(dir=base_dir)
script_args.extend(['--build-temp', temp_dir])
setup(
script_name='setup.py',
script_args=script_args,
ext_modules=ext_modules,
)
finally:
if base_dir:
os.chdir(cwd)
if temp_dir and os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
def parse_args(args):
from optparse import OptionParser
parser = OptionParser(usage='%prog [options] [sources and packages]+')
parser.add_option('-X', '--directive', metavar='NAME=VALUE,...', dest='directives',
type=str, action='callback', callback=parse_directives, default={},
help='set a compiler directive')
parser.add_option('-s', '--option', metavar='NAME=VALUE', dest='options',
type=str, action='callback', callback=parse_options, default={},
help='set a cythonize option')
parser.add_option('-3', dest='python3_mode', action='store_true',
help='use Python 3 syntax mode by default')
parser.add_option('-x', '--exclude', metavar='PATTERN', dest='excludes',
action='append', default=[],
help='exclude certain file patterns from the compilation')
parser.add_option('-b', '--build', dest='build', action='store_true',
help='build extension modules using distutils')
parser.add_option('-i', '--inplace', dest='build_inplace', action='store_true',
help='build extension modules in place using distutils (implies -b)')
parser.add_option('-j', '--parallel', dest='parallel', metavar='N',
type=int, default=parallel_compiles,
help=('run builds in N parallel jobs (default: %d)' %
parallel_compiles or 1))
parser.add_option('-f', '--force', dest='force', action='store_true',
help='force recompilation')
parser.add_option('-q', '--quiet', dest='quiet', action='store_true',
help='be less verbose during compilation')
parser.add_option('--lenient', dest='lenient', action='store_true',
help='increase Python compatibility by ignoring some compile time errors')
parser.add_option('-k', '--keep-going', dest='keep_going', action='store_true',
help='compile as much as possible, ignore compilation failures')
options, args = parser.parse_args(args)
if not args:
parser.error("no source files provided")
if options.build_inplace:
options.build = True
if multiprocessing is None:
options.parallel = 0
if options.python3_mode:
options.options['language_level'] = 3
return options, args
def main(args=None):
options, paths = parse_args(args)
if options.lenient:
# increase Python compatibility by ignoring compile time errors
Options.error_on_unknown_names = False
Options.error_on_uninitialized = False
for path in paths:
cython_compile(path, options)
if __name__ == '__main__':
main()
| thedrow/cython | Cython/Build/Cythonize.py | Python | apache-2.0 | 6,882 | 0.001453 |
# -*- coding: utf-8 -*-
"""
Running the Gibbs sampler on flowcymetry data
http://www.physics.orst.edu/~rubin/nacphy/lapack/linear.html
matlab time: Elapsed time is 1.563538 seconds.
improve sample_mu:
python: 0.544 0.005 0.664
cython_admi: 0.469 0.005 0.493
moved_index_in_cython: 0.148 0.002 0.217 (most time is highmem)
changed_index 0.136 0.001 0.174
removed_higmem: 0.048 0.000 0.048
improve sample_sigma:
python: 0.544 0.005 0.664
cython_admi: 0.313 0.003 0.364
moved_index_in_cython: 0.145 0.001 0.199
changed_index : 0.074 0.000 0.081 (used BLAS matrix calc)
changed to syrk : 0.060 0.000 0.067
to profile use:
%prun main(K=5):
ncalls tottime percall cumtime percall filename:lineno(function)
500 0.358 0.001 0.358 0.001 rng_cython.pyx:262(calc_lik)
100 0.297 0.003 0.297 0.003 rng_cython.pyx:291(calc_exp_normalize)
500 0.159 0.000 0.167 0.000 rng_cython.pyx:129(sample_mix_sigma_zero_mean)
100 0.145 0.001 0.199 0.002 GMM.py:40(sample_mu)
1 0.099 0.099 0.218 0.218 npyio.py:628(loadtxt)
500 0.053 0.000 0.053 0.000 rng_cython.pyx:169(sample_mu)
100 0.052 0.001 0.052 0.001 rng_cython.pyx:238(draw_x)
100 0.045 0.000 0.700 0.007 GMM.py:90(compute_ProbX)
59998/29999 0.037 0.000 0.040 0.000 npyio.py:772(pack_items)
30000 0.026 0.000 0.048 0.000 npyio.py:788(split_line)
507 0.018 0.000 0.018 0.000 {method 'reduce' of 'numpy.ufunc' objects}
60000 0.017 0.000 0.017 0.000 {method 'split' of 'str' objects}
100 0.015 0.000 0.034 0.000 GMM.py:208(sample_p)
12 0.014 0.001 0.014 0.001 {numpy.core.multiarray.array}
29999 0.012 0.000 0.012 0.000 {zip}
%prun main_python(K=5)
ncalls tottime percall cumtime percall filename:lineno(function)
10707 0.584 0.000 0.584 0.000 {method 'reduce' of 'numpy.ufunc' objects}
100 0.574 0.006 2.195 0.022 GMM.py:149(sample_x)
100 0.544 0.005 0.664 0.007 GMM.py:176(sample_mu)
100 0.499 0.005 1.295 0.013 GMM.py:219(compute_ProbX)
100 0.334 0.003 0.549 0.005 GMM.py:189(sample_sigma)
3501 0.310 0.000 0.310 0.000 {numpy.core._dotblas.dot}
16112 0.252 0.000 0.252 0.000 {numpy.core.multiarray.array}
1 0.101 0.101 0.223 0.223 npyio.py:628(loadtxt)
100 0.048 0.000 0.048 0.000 {method 'cumsum' of 'numpy.ndarray' objects}
59998/29999 0.038 0.000 0.041 0.000 npyio.py:772(pack_items)
Created on Fri Jun 20 16:52:31 2014
@author: jonaswallin
"""
from __future__ import division
import numpy as np
from BayesFlow import mixture
import BayesFlow.PurePython.GMM as GMM
from matplotlib import pyplot as plt
import numpy.random as npr
import time
K = 5
def main(K= 5):
sim = 100
data = np.ascontiguousarray(np.loadtxt('../data/flowcym.dat',skiprows=1,usecols=(1,2,3,4,5,6)))
mix = mixture(data,K,high_memory=True)
t0 = time.time()
for i in range(sim): # @UnusedVariable
mix.sample()
t1 = time.time()
print("mixture took %.4f sec"%(t1-t0))
def main_python(K = 5):
sim = 100
data = np.ascontiguousarray(np.loadtxt('../data/flowcym.dat',skiprows=1,usecols=(1,2,3,4,5,6)))
mix = GMM.mixture(data,K)
t0 = time.time()
for i in range(sim): # @UnusedVariable
mix.sample()
t1 = time.time()
print("mixture took %.4f sec"%(t1-t0))
if __name__ == '__main__':
sim = 10
data = np.ascontiguousarray(np.loadtxt('../data/flowcym.dat',skiprows=1,usecols=(1,2,3,4,5,6)))
mix = mixture(data, K)
mus = np.zeros((sim,2*data.shape[1]))
t0 = time.time()
for i in range(sim):
mix.sample()
mus[i,:data.shape[1]] = mix.mu[0]
mus[i,data.shape[1]:] = mix.mu[1]
t1 = time.time()
if 1:
for k in range(mix.K):
plt.plot(mix.data[mix.x==k,0],mix.data[mix.x==k,1],'o')
plt.figure()
for k in range(mix.K):
plt.plot(mus[:,(2*k):(2*(k+1))])
plt.show()
print("mixture took %.4f sec"%(t1-t0))
mix2 = GMM.mixture(data,K)
mus = np.zeros((sim,4))
t0 = time.time()
for i in range(sim):
mix2.sample()
t1 = time.time()
print("Python mixture took %.4f sec"%(t1-t0))
if 0:
import pstats, cProfile
import pyximport
pyximport.install()
import bayesianmixture.distributions.rng_cython as rng_cython
#cProfile.runctx("rng_cython.sample_mu_rep(np.sum(mix.data[mix.x == 0 ,:],1),mix.sigma[0],mix.prior[0]['mu']['theta'].reshape(mix.d),mix.prior[0]['mu']['sigma'],npr.rand(mix.d),10000)", globals(), locals(), "Profile.prof")
cProfile.runctx("for k in range(100): mix.sample_mu()", globals(), locals(), "Profile.prof")
s = pstats.Stats("Profile.prof")
s.strip_dirs().sort_stats("time").print_stats() | JonasWallin/BayesFlow | examples/flowcymetry_normalMixture.py | Python | gpl-2.0 | 5,242 | 0.014117 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2017-02-12 06:08
from __future__ import unicode_literals
from django.db import migrations
import fossevents.users.models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_user_is_moderator'),
]
operations = [
migrations.AlterModelManagers(
name='user',
managers=[
('objects', fossevents.users.models.CustomUserManager()),
],
),
]
| fossevents/fossevents.in | fossevents/users/migrations/0005_auto_20170212_1138.py | Python | mit | 504 | 0 |
# -*- coding: utf-8 -*-
"""The app module, containing the app factory function."""
from flask import Flask, render_template, Markup
from . import public, admin
from .extensions import *
from .config import Config
#extensions
def getPackage(num):
packages = {
"0": "No Package",
"1": "Basic Package",
"2": "Deluxe Package",
"3": "Ultimate Blast Package",
"4": "Party Package",
"5": "Holiday Package",
"6": "Behind the Scenes Package"
}
return packages[str(num)]
def formatHours(hour):
if hour <= 12:
return str(hour) + " A.M."
else:
return str(hour - 12) + " P.M."
_js_escapes = {
'\\': '\\u005C',
'\'': '\\u0027',
'"': '\\u0022',
'>': '\\u003E',
'<': '\\u003C',
'&': '\\u0026',
'=': '\\u003D',
'-': '\\u002D',
';': '\\u003B',
u'\u2028': '\\u2028',
u'\u2029': '\\u2029'
}
# Escape every ASCII character with a value less than 32.
_js_escapes.update(('%c' % z, '\\u%04X' % z) for z in xrange(32))
def jinja2_escapejs_filter(value):
retval = []
for letter in value:
if _js_escapes.has_key(letter):
retval.append(_js_escapes[letter])
else:
retval.append(letter)
return Markup("".join(retval))
#creates and returna a flask app instance
def create_app(config_object=Config):
app = Flask(__name__)
app.config.from_object(config_object)
register_extensions(app)
register_jinja_extensions(app)
register_blueprints(app)
register_errorhandlers(app)
setup_logging(app)
return app
#set up gunicorn logging in production
def setup_logging(app):
if not app.debug:
# In production mode, add log handler to sys.stderr.
app.logger.addHandler(logging.StreamHandler())
app.logger.setLevel(logging.INFO)
return None
#register flask extensions
def register_extensions(app):
bcrypt.init_app(app)
cache.init_app(app)
db.init_app(app)
login_manager.init_app(app)
debug_toolbar.init_app(app)
migrate.init_app(app, db)
return None
#register blue prints to the app
def register_blueprints(app):
app.register_blueprint(public.views.blueprint)
app.register_blueprint(admin.views.blueprint)
return None
#add jinja extensions
def register_jinja_extensions(app):
def get_year(*args): #returns the current year
import datetime
now = datetime.datetime.now()
return now.year
app.jinja_env.filters['currentYear'] = get_year #creates a filter that returns the current year
app.jinja_env.filters['escapejs'] = jinja2_escapejs_filter
app.jinja_env.globals.update(formatHours=formatHours)
app.jinja_env.globals.update(getPackage=getPackage)
return None
#register error handlers
def register_errorhandlers(app):
def render_error(error):
error_code = getattr(error, 'code', 500)
print error_code
if error_code == 404:
return render_template("notfound.html", error=error_code), error_code
else:
return render_template("error.html", error=error_code), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None | rileymjohnson/fbla | app/main.py | Python | mit | 3,324 | 0.008724 |
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for the module module, which contains Module and related classes."""
import os
import unittest
from tvcm import fake_fs
from tvcm import module
from tvcm import resource_loader
from tvcm import project as project_module
class ModuleIntegrationTests(unittest.TestCase):
def test_module(self):
fs = fake_fs.FakeFS()
fs.AddFile('/src/x.html', """
<!DOCTYPE html>
<link rel="import" href="/y.html">
<link rel="import" href="/z.html">
<script>
'use strict';
</script>
""")
fs.AddFile('/src/y.html', """
<!DOCTYPE html>
<link rel="import" href="/z.html">
""")
fs.AddFile('/src/z.html', """
<!DOCTYPE html>
""")
fs.AddFile('/src/tvcm.html', '<!DOCTYPE html>')
with fs:
project = project_module.Project([os.path.normpath('/src/')])
loader = resource_loader.ResourceLoader(project)
x_module = loader.LoadModule('x')
self.assertEquals([loader.loaded_modules['y'],
loader.loaded_modules['z']],
x_module.dependent_modules)
already_loaded_set = set()
load_sequence = []
x_module.ComputeLoadSequenceRecursive(load_sequence, already_loaded_set)
self.assertEquals([loader.loaded_modules['z'],
loader.loaded_modules['y'],
x_module],
load_sequence)
def testBasic(self):
fs = fake_fs.FakeFS()
fs.AddFile('/x/src/my_module.html', """
<!DOCTYPE html>
<link rel="import" href="/tvcm/foo.html">
});
""")
fs.AddFile('/x/tvcm/foo.html', """
<!DOCTYPE html>
});
""")
project = project_module.Project([os.path.normpath('/x')])
loader = resource_loader.ResourceLoader(project)
with fs:
my_module = loader.LoadModule(module_name='src.my_module')
dep_names = [x.name for x in my_module.dependent_modules]
self.assertEquals(['tvcm.foo'], dep_names)
def testDepsExceptionContext(self):
fs = fake_fs.FakeFS()
fs.AddFile('/x/src/my_module.html', """
<!DOCTYPE html>
<link rel="import" href="/tvcm/foo.html">
""")
fs.AddFile('/x/tvcm/foo.html', """
<!DOCTYPE html>
<link rel="import" href="missing.html">
""")
project = project_module.Project([os.path.normpath('/x')])
loader = resource_loader.ResourceLoader(project)
with fs:
exc = None
try:
loader.LoadModule(module_name='src.my_module')
assert False, 'Expected an exception'
except module.DepsException, e:
exc = e
self.assertEquals(
['src.my_module', 'tvcm.foo'],
exc.context)
def testGetAllDependentFilenamesRecursive(self):
fs = fake_fs.FakeFS()
fs.AddFile('/x/y/z/foo.html', """
<!DOCTYPE html>
<link rel="import" href="/z/foo2.html">
<link rel="stylesheet" href="/z/foo.css">
<script src="/bar.js"></script>
""")
fs.AddFile('/x/y/z/foo.css', """
.x .y {
background-image: url(foo.jpeg);
}
""")
fs.AddFile('/x/y/z/foo.jpeg', '')
fs.AddFile('/x/y/z/foo2.html', """
<!DOCTYPE html>
""")
fs.AddFile('/x/raw/bar.js', 'hello')
project = project_module.Project([
os.path.normpath('/x/y'), os.path.normpath('/x/raw/')])
loader = resource_loader.ResourceLoader(project)
with fs:
my_module = loader.LoadModule(module_name='z.foo')
self.assertEquals(1, len(my_module.dependent_raw_scripts))
dependent_filenames = my_module.GetAllDependentFilenamesRecursive()
self.assertEquals(
[
os.path.normpath('/x/y/z/foo.html'),
os.path.normpath('/x/raw/bar.js'),
os.path.normpath('/x/y/z/foo.css'),
os.path.normpath('/x/y/z/foo.jpeg'),
os.path.normpath('/x/y/z/foo2.html'),
],
dependent_filenames)
| dstockwell/catapult | tracing/third_party/tvcm/tvcm/module_unittest.py | Python | bsd-3-clause | 3,914 | 0.005876 |
# -*- coding: utf-8 -*-
from odoo import models, fields, api, _
from odoo.exceptions import UserError, ValidationError
MAP_INVOICE_TYPE_PARTNER_TYPE = {
'out_invoice': 'customer',
'out_refund': 'customer',
'in_invoice': 'supplier',
'in_refund': 'supplier',
}
# Since invoice amounts are unsigned, this is how we know if money comes in or goes out
MAP_INVOICE_TYPE_PAYMENT_SIGN = {
'out_invoice': 1,
'in_refund': -1,
'in_invoice': -1,
'out_refund': 1,
}
class account_payment_method(models.Model):
_name = "account.payment.method"
_description = "Payment Methods"
name = fields.Char(required=True, translate=True)
code = fields.Char(required=True) # For internal identification
payment_type = fields.Selection([('inbound', 'Inbound'), ('outbound', 'Outbound')], required=True)
class account_abstract_payment(models.AbstractModel):
_name = "account.abstract.payment"
_description = "Contains the logic shared between models which allows to register payments"
payment_type = fields.Selection([('outbound', 'Send Money'), ('inbound', 'Receive Money')], string='Payment Type', required=True)
payment_method_id = fields.Many2one('account.payment.method', string='Payment Method Type', required=True, oldname="payment_method",
help="Manual: Get paid by cash, check or any other method outside of Odoo.\n"\
"Electronic: Get paid automatically through a payment acquirer by requesting a transaction on a card saved by the customer when buying or subscribing online (payment token).\n"\
"Check: Pay bill by check and print it from Odoo.\n"\
"Batch Deposit: Encash several customer checks at once by generating a batch deposit to submit to your bank. When encoding the bank statement in Odoo, you are suggested to reconcile the transaction with the batch deposit.To enable batch deposit,module account_batch_deposit must be installed.\n"\
"SEPA Credit Transfer: Pay bill from a SEPA Credit Transfer file you submit to your bank. To enable sepa credit transfer, module account_sepa must be installed ")
payment_method_code = fields.Char(related='payment_method_id.code',
help="Technical field used to adapt the interface to the payment type selected.", readonly=True)
partner_type = fields.Selection([('customer', 'Customer'), ('supplier', 'Vendor')])
partner_id = fields.Many2one('res.partner', string='Partner')
amount = fields.Monetary(string='Payment Amount', required=True)
currency_id = fields.Many2one('res.currency', string='Currency', required=True, default=lambda self: self.env.user.company_id.currency_id)
payment_date = fields.Date(string='Payment Date', default=fields.Date.context_today, required=True, copy=False)
communication = fields.Char(string='Memo')
journal_id = fields.Many2one('account.journal', string='Payment Journal', required=True, domain=[('type', 'in', ('bank', 'cash'))])
company_id = fields.Many2one('res.company', related='journal_id.company_id', string='Company', readonly=True)
hide_payment_method = fields.Boolean(compute='_compute_hide_payment_method',
help="Technical field used to hide the payment method if the selected journal has only one available which is 'manual'")
@api.one
@api.constrains('amount')
def _check_amount(self):
if self.amount < 0:
raise ValidationError(_('The payment amount cannot be negative.'))
@api.multi
@api.depends('payment_type', 'journal_id')
def _compute_hide_payment_method(self):
for payment in self:
if not payment.journal_id:
payment.hide_payment_method = True
continue
journal_payment_methods = payment.payment_type == 'inbound'\
and payment.journal_id.inbound_payment_method_ids\
or payment.journal_id.outbound_payment_method_ids
payment.hide_payment_method = len(journal_payment_methods) == 1 and journal_payment_methods[0].code == 'manual'
@api.onchange('journal_id')
def _onchange_journal(self):
if self.journal_id:
self.currency_id = self.journal_id.currency_id or self.company_id.currency_id
# Set default payment method (we consider the first to be the default one)
payment_methods = self.payment_type == 'inbound' and self.journal_id.inbound_payment_method_ids or self.journal_id.outbound_payment_method_ids
self.payment_method_id = payment_methods and payment_methods[0] or False
# Set payment method domain (restrict to methods enabled for the journal and to selected payment type)
payment_type = self.payment_type in ('outbound', 'transfer') and 'outbound' or 'inbound'
return {'domain': {'payment_method_id': [('payment_type', '=', payment_type), ('id', 'in', payment_methods.ids)]}}
return {}
@api.model
def _compute_total_invoices_amount(self):
""" Compute the sum of the residual of invoices, expressed in the payment currency """
payment_currency = self.currency_id or self.journal_id.currency_id or self.journal_id.company_id.currency_id or self.env.user.company_id.currency_id
total = 0
for inv in self.invoice_ids:
if inv.currency_id == payment_currency:
total += inv.residual_signed
else:
total += inv.company_currency_id.with_context(date=self.payment_date).compute(
inv.residual_company_signed, payment_currency)
return abs(total)
class account_register_payments(models.TransientModel):
_name = "account.register.payments"
_inherit = 'account.abstract.payment'
_description = "Register payments on multiple invoices"
invoice_ids = fields.Many2many('account.invoice', string='Invoices', copy=False)
multi = fields.Boolean(string='Multi', help='Technical field indicating if the user selected invoices from multiple partners or from different types.')
@api.onchange('payment_type')
def _onchange_payment_type(self):
if self.payment_type:
return {'domain': {'payment_method_id': [('payment_type', '=', self.payment_type)]}}
@api.model
def _compute_payment_amount(self, invoice_ids):
payment_currency = self.currency_id or self.journal_id.currency_id or self.journal_id.company_id.currency_id
total = 0
for inv in invoice_ids:
if inv.currency_id == payment_currency:
total += MAP_INVOICE_TYPE_PAYMENT_SIGN[inv.type] * inv.residual_company_signed
else:
amount_residual = inv.company_currency_id.with_context(date=self.payment_date).compute(
inv.residual_company_signed, payment_currency)
total += MAP_INVOICE_TYPE_PAYMENT_SIGN[inv.type] * amount_residual
return total
@api.model
def default_get(self, fields):
rec = super(account_register_payments, self).default_get(fields)
active_ids = self._context.get('active_ids')
# Check for selected invoices ids
if not active_ids:
raise UserError(_("Programmation error: wizard action executed without active_ids in context."))
invoices = self.env['account.invoice'].browse(active_ids)
# Check all invoices are open
if any(invoice.state != 'open' for invoice in invoices):
raise UserError(_("You can only register payments for open invoices"))
# Check all invoices have the same currency
if any(inv.currency_id != invoices[0].currency_id for inv in invoices):
raise UserError(_("In order to pay multiple invoices at once, they must use the same currency."))
# Look if we are mixin multiple commercial_partner or customer invoices with vendor bills
multi = any(inv.commercial_partner_id != invoices[0].commercial_partner_id
or MAP_INVOICE_TYPE_PARTNER_TYPE[inv.type] != MAP_INVOICE_TYPE_PARTNER_TYPE[invoices[0].type]
for inv in invoices)
total_amount = self._compute_payment_amount(invoices)
rec.update({
'amount': abs(total_amount),
'currency_id': invoices[0].currency_id.id,
'payment_type': total_amount > 0 and 'inbound' or 'outbound',
'partner_id': False if multi else invoices[0].commercial_partner_id.id,
'partner_type': False if multi else MAP_INVOICE_TYPE_PARTNER_TYPE[invoices[0].type],
'communication': ' '.join([ref for ref in invoices.mapped('reference') if ref]),
'invoice_ids': [(6, 0, invoices.ids)],
'multi': multi,
})
return rec
@api.multi
def _groupby_invoices(self):
'''Split the invoices linked to the wizard according to their commercial partner and their type.
:return: a dictionary mapping (commercial_partner_id, type) => invoices recordset.
'''
results = {}
# Create a dict dispatching invoices according to their commercial_partner_id and type
for inv in self.invoice_ids:
key = (inv.commercial_partner_id.id, MAP_INVOICE_TYPE_PARTNER_TYPE[inv.type])
if not key in results:
results[key] = self.env['account.invoice']
results[key] += inv
return results
@api.multi
def _prepare_payment_vals(self, invoices):
'''Create the payment values.
:param invoices: The invoices that should have the same commercial partner and the same type.
:return: The payment values as a dictionary.
'''
amount = self._compute_payment_amount(invoices) if self.multi else self.amount
payment_type = ('inbound' if amount > 0 else 'outbound') if self.multi else self.payment_type
return {
'journal_id': self.journal_id.id,
'payment_method_id': self.payment_method_id.id,
'payment_date': self.payment_date,
'communication': self.communication,
'invoice_ids': [(6, 0, invoices.ids)],
'payment_type': payment_type,
'amount': abs(amount),
'currency_id': self.currency_id.id,
'partner_id': invoices[0].commercial_partner_id.id,
'partner_type': MAP_INVOICE_TYPE_PARTNER_TYPE[invoices[0].type],
}
@api.multi
def get_payments_vals(self):
'''Compute the values for payments.
:return: a list of payment values (dictionary).
'''
if self.multi:
groups = self._groupby_invoices()
return [self._prepare_payment_vals(invoices) for invoices in groups.values()]
return [self._prepare_payment_vals(self.invoice_ids)]
@api.multi
def create_payments(self):
'''Create payments according to the invoices.
Having invoices with different commercial_partner_id or different type (Vendor bills with customer invoices)
leads to multiple payments.
In case of all the invoices are related to the same commercial_partner_id and have the same type,
only one payment will be created.
:return: The ir.actions.act_window to show created payments.
'''
Payment = self.env['account.payment']
payments = Payment
for payment_vals in self.get_payments_vals():
payments += Payment.create(payment_vals)
payments.post()
return {
'name': _('Payments'),
'domain': [('id', 'in', payments.ids), ('state', '=', 'posted')],
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.payment',
'view_id': False,
'type': 'ir.actions.act_window',
}
class account_payment(models.Model):
_name = "account.payment"
_inherit = ['mail.thread', 'account.abstract.payment']
_description = "Payments"
_order = "payment_date desc, name desc"
@api.one
@api.depends('invoice_ids')
def _get_has_invoices(self):
self.has_invoices = bool(self.invoice_ids)
@api.multi
@api.depends('move_line_ids.reconciled')
def _get_move_reconciled(self):
for payment in self:
rec = True
for aml in payment.move_line_ids.filtered(lambda x: x.account_id.reconcile):
if not aml.reconciled:
rec = False
payment.move_reconciled = rec
@api.one
@api.depends('invoice_ids', 'amount', 'payment_date', 'currency_id')
def _compute_payment_difference(self):
if len(self.invoice_ids) == 0:
return
if self.invoice_ids[0].type in ['in_invoice', 'out_refund']:
self.payment_difference = self.amount - self._compute_total_invoices_amount()
else:
self.payment_difference = self._compute_total_invoices_amount() - self.amount
company_id = fields.Many2one(store=True)
name = fields.Char(readonly=True, copy=False, default="Draft Payment") # The name is attributed upon post()
state = fields.Selection([('draft', 'Draft'), ('posted', 'Posted'), ('sent', 'Sent'), ('reconciled', 'Reconciled'), ('cancelled', 'Cancelled')], readonly=True, default='draft', copy=False, string="Status")
payment_type = fields.Selection(selection_add=[('transfer', 'Internal Transfer')])
payment_reference = fields.Char(copy=False, readonly=True, help="Reference of the document used to issue this payment. Eg. check number, file name, etc.")
move_name = fields.Char(string='Journal Entry Name', readonly=True,
default=False, copy=False,
help="Technical field holding the number given to the journal entry, automatically set when the statement line is reconciled then stored to set the same number again if the line is cancelled, set to draft and re-processed again.")
# Money flows from the journal_id's default_debit_account_id or default_credit_account_id to the destination_account_id
destination_account_id = fields.Many2one('account.account', compute='_compute_destination_account_id', readonly=True)
# For money transfer, money goes from journal_id to a transfer account, then from the transfer account to destination_journal_id
destination_journal_id = fields.Many2one('account.journal', string='Transfer To', domain=[('type', 'in', ('bank', 'cash'))])
invoice_ids = fields.Many2many('account.invoice', 'account_invoice_payment_rel', 'payment_id', 'invoice_id', string="Invoices", copy=False, readonly=True)
has_invoices = fields.Boolean(compute="_get_has_invoices", help="Technical field used for usability purposes")
payment_difference = fields.Monetary(compute='_compute_payment_difference', readonly=True)
payment_difference_handling = fields.Selection([('open', 'Keep open'), ('reconcile', 'Mark invoice as fully paid')], default='open', string="Payment Difference", copy=False)
writeoff_account_id = fields.Many2one('account.account', string="Difference Account", domain=[('deprecated', '=', False)], copy=False)
writeoff_label = fields.Char(
string='Journal Item Label',
help='Change label of the counterpart that will hold the payment difference',
default='Write-Off')
# FIXME: ondelete='restrict' not working (eg. cancel a bank statement reconciliation with a payment)
move_line_ids = fields.One2many('account.move.line', 'payment_id', readonly=True, copy=False, ondelete='restrict')
move_reconciled = fields.Boolean(compute="_get_move_reconciled", readonly=True)
def open_payment_matching_screen(self):
# Open reconciliation view for customers/suppliers
move_line_id = False
for move_line in self.move_line_ids:
if move_line.account_id.reconcile:
move_line_id = move_line.id
break;
action_context = {'company_ids': [self.company_id.id], 'partner_ids': [self.partner_id.commercial_partner_id.id]}
if self.partner_type == 'customer':
action_context.update({'mode': 'customers'})
elif self.partner_type == 'supplier':
action_context.update({'mode': 'suppliers'})
if move_line_id:
action_context.update({'move_line_id': move_line_id})
return {
'type': 'ir.actions.client',
'tag': 'manual_reconciliation_view',
'context': action_context,
}
@api.onchange('amount', 'currency_id')
def _onchange_amount(self):
journal_type = ['bank', 'cash']
domain = []
if self.currency_id.is_zero(self.amount):
# In case of payment with 0 amount, allow to select a journal of type 'general' like
# 'Miscellaneous Operations' and set this journal by default.
journal_type.append('general')
self.payment_difference_handling = 'reconcile'
self.journal_id = self.env['account.journal'].search([('type', '=', 'general')], limit=1)
else:
if self.payment_type == 'inbound':
domain.append(('at_least_one_inbound', '=', True))
else:
domain.append(('at_least_one_outbound', '=', True))
domain.append(('type', 'in', journal_type))
return {'domain': {'journal_id': domain}}
@api.one
@api.depends('invoice_ids', 'payment_type', 'partner_type', 'partner_id')
def _compute_destination_account_id(self):
if self.invoice_ids:
self.destination_account_id = self.invoice_ids[0].account_id.id
elif self.payment_type == 'transfer':
if not self.company_id.transfer_account_id.id:
raise UserError(_('Transfer account not defined on the company.'))
self.destination_account_id = self.company_id.transfer_account_id.id
elif self.partner_id:
if self.partner_type == 'customer':
self.destination_account_id = self.partner_id.property_account_receivable_id.id
else:
self.destination_account_id = self.partner_id.property_account_payable_id.id
@api.onchange('partner_type')
def _onchange_partner_type(self):
# Set partner_id domain
if self.partner_type:
return {'domain': {'partner_id': [(self.partner_type, '=', True)]}}
@api.onchange('payment_type')
def _onchange_payment_type(self):
if not self.invoice_ids:
# Set default partner type for the payment type
if self.payment_type == 'inbound':
self.partner_type = 'customer'
elif self.payment_type == 'outbound':
self.partner_type = 'supplier'
# Set payment method domain
res = self._onchange_journal()
if not res.get('domain', {}):
res['domain'] = {}
res['domain']['journal_id'] = self.payment_type == 'inbound' and [('at_least_one_inbound', '=', True)] or [('at_least_one_outbound', '=', True)]
res['domain']['journal_id'].append(('type', 'in', ('bank', 'cash')))
return res
@api.model
def default_get(self, fields):
rec = super(account_payment, self).default_get(fields)
invoice_defaults = self.resolve_2many_commands('invoice_ids', rec.get('invoice_ids'))
if invoice_defaults and len(invoice_defaults) == 1:
invoice = invoice_defaults[0]
rec['communication'] = invoice['reference'] or invoice['name'] or invoice['number']
rec['currency_id'] = invoice['currency_id'][0]
rec['payment_type'] = invoice['type'] in ('out_invoice', 'in_refund') and 'inbound' or 'outbound'
rec['partner_type'] = MAP_INVOICE_TYPE_PARTNER_TYPE[invoice['type']]
rec['partner_id'] = invoice['partner_id'][0]
rec['amount'] = invoice['residual']
return rec
@api.multi
def button_journal_entries(self):
return {
'name': _('Journal Items'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.move.line',
'view_id': False,
'type': 'ir.actions.act_window',
'domain': [('payment_id', 'in', self.ids)],
}
@api.multi
def button_invoices(self):
return {
'name': _('Paid Invoices'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.invoice',
'view_id': False,
'type': 'ir.actions.act_window',
'domain': [('id', 'in', [x.id for x in self.invoice_ids])],
}
@api.multi
def button_dummy(self):
return True
@api.multi
def unreconcile(self):
""" Set back the payments in 'posted' or 'sent' state, without deleting the journal entries.
Called when cancelling a bank statement line linked to a pre-registered payment.
"""
for payment in self:
if payment.payment_reference:
payment.write({'state': 'sent'})
else:
payment.write({'state': 'posted'})
@api.multi
def cancel(self):
for rec in self:
for move in rec.move_line_ids.mapped('move_id'):
if rec.invoice_ids:
move.line_ids.remove_move_reconcile()
move.button_cancel()
move.unlink()
rec.state = 'cancelled'
@api.multi
def unlink(self):
if any(bool(rec.move_line_ids) for rec in self):
raise UserError(_("You can not delete a payment that is already posted"))
if any(rec.move_name for rec in self):
raise UserError(_('It is not allowed to delete a payment that already created a journal entry since it would create a gap in the numbering. You should create the journal entry again and cancel it thanks to a regular revert.'))
return super(account_payment, self).unlink()
@api.multi
def post(self):
""" Create the journal items for the payment and update the payment's state to 'posted'.
A journal entry is created containing an item in the source liquidity account (selected journal's default_debit or default_credit)
and another in the destination reconciliable account (see _compute_destination_account_id).
If invoice_ids is not empty, there will be one reconciliable move line per invoice to reconcile with.
If the payment is a transfer, a second journal entry is created in the destination journal to receive money from the transfer account.
"""
for rec in self:
if rec.state != 'draft':
raise UserError(_("Only a draft payment can be posted. Trying to post a payment in state %s.") % rec.state)
if any(inv.state != 'open' for inv in rec.invoice_ids):
raise ValidationError(_("The payment cannot be processed because the invoice is not open!"))
# Use the right sequence to set the name
if rec.payment_type == 'transfer':
sequence_code = 'account.payment.transfer'
else:
if rec.partner_type == 'customer':
if rec.payment_type == 'inbound':
sequence_code = 'account.payment.customer.invoice'
if rec.payment_type == 'outbound':
sequence_code = 'account.payment.customer.refund'
if rec.partner_type == 'supplier':
if rec.payment_type == 'inbound':
sequence_code = 'account.payment.supplier.refund'
if rec.payment_type == 'outbound':
sequence_code = 'account.payment.supplier.invoice'
rec.name = self.env['ir.sequence'].with_context(ir_sequence_date=rec.payment_date).next_by_code(sequence_code)
if not rec.name and self.payment_type != 'transfer':
raise UserError(_("You have to define a sequence for %s in your company.") % (sequence_code,))
# Create the journal entry
amount = rec.amount * (rec.payment_type in ('outbound', 'transfer') and 1 or -1)
move = rec._create_payment_entry(amount)
# In case of a transfer, the first journal entry created debited the source liquidity account and credited
# the transfer account. Now we debit the transfer account and credit the destination liquidity account.
if rec.payment_type == 'transfer':
transfer_credit_aml = move.line_ids.filtered(lambda r: r.account_id == rec.company_id.transfer_account_id)
transfer_debit_aml = rec._create_transfer_entry(amount)
(transfer_credit_aml + transfer_debit_aml).reconcile()
rec.write({'state': 'posted', 'move_name': move.name})
@api.multi
def action_draft(self):
return self.write({'state': 'draft'})
def action_validate_invoice_payment(self):
""" Posts a payment used to pay an invoice. This function only posts the
payment by default but can be overridden to apply specific post or pre-processing.
It is called by the "validate" button of the popup window
triggered on invoice form by the "Register Payment" button.
"""
if any(len(record.invoice_ids) != 1 for record in self):
# For multiple invoices, there is account.register.payments wizard
raise UserError(_("This method should only be called to process a single invoice's payment."))
self.post();
def _create_payment_entry(self, amount):
""" Create a journal entry corresponding to a payment, if the payment references invoice(s) they are reconciled.
Return the journal entry.
"""
aml_obj = self.env['account.move.line'].with_context(check_move_validity=False)
invoice_currency = False
if self.invoice_ids and all([x.currency_id == self.invoice_ids[0].currency_id for x in self.invoice_ids]):
#if all the invoices selected share the same currency, record the paiement in that currency too
invoice_currency = self.invoice_ids[0].currency_id
debit, credit, amount_currency, currency_id = aml_obj.with_context(date=self.payment_date).compute_amount_fields(amount, self.currency_id, self.company_id.currency_id, invoice_currency)
move = self.env['account.move'].create(self._get_move_vals())
#Write line corresponding to invoice payment
counterpart_aml_dict = self._get_shared_move_line_vals(debit, credit, amount_currency, move.id, False)
counterpart_aml_dict.update(self._get_counterpart_move_line_vals(self.invoice_ids))
counterpart_aml_dict.update({'currency_id': currency_id})
counterpart_aml = aml_obj.create(counterpart_aml_dict)
#Reconcile with the invoices
if self.payment_difference_handling == 'reconcile' and self.payment_difference:
writeoff_line = self._get_shared_move_line_vals(0, 0, 0, move.id, False)
amount_currency_wo, currency_id = aml_obj.with_context(date=self.payment_date).compute_amount_fields(self.payment_difference, self.currency_id, self.company_id.currency_id, invoice_currency)[2:]
# the writeoff debit and credit must be computed from the invoice residual in company currency
# minus the payment amount in company currency, and not from the payment difference in the payment currency
# to avoid loss of precision during the currency rate computations. See revision 20935462a0cabeb45480ce70114ff2f4e91eaf79 for a detailed example.
total_residual_company_signed = sum(invoice.residual_company_signed for invoice in self.invoice_ids)
total_payment_company_signed = self.currency_id.with_context(date=self.payment_date).compute(self.amount, self.company_id.currency_id)
if self.invoice_ids[0].type in ['in_invoice', 'out_refund']:
amount_wo = total_payment_company_signed - total_residual_company_signed
else:
amount_wo = total_residual_company_signed - total_payment_company_signed
# Align the sign of the secondary currency writeoff amount with the sign of the writeoff
# amount in the company currency
if amount_wo > 0:
debit_wo = amount_wo
credit_wo = 0.0
amount_currency_wo = abs(amount_currency_wo)
else:
debit_wo = 0.0
credit_wo = -amount_wo
amount_currency_wo = -abs(amount_currency_wo)
writeoff_line['name'] = self.writeoff_label
writeoff_line['account_id'] = self.writeoff_account_id.id
writeoff_line['debit'] = debit_wo
writeoff_line['credit'] = credit_wo
writeoff_line['amount_currency'] = amount_currency_wo
writeoff_line['currency_id'] = currency_id
writeoff_line = aml_obj.create(writeoff_line)
if counterpart_aml['debit'] or writeoff_line['credit']:
counterpart_aml['debit'] += credit_wo - debit_wo
if counterpart_aml['credit'] or writeoff_line['debit']:
counterpart_aml['credit'] += debit_wo - credit_wo
counterpart_aml['amount_currency'] -= amount_currency_wo
#Write counterpart lines
if not self.currency_id.is_zero(self.amount):
if not self.currency_id != self.company_id.currency_id:
amount_currency = 0
liquidity_aml_dict = self._get_shared_move_line_vals(credit, debit, -amount_currency, move.id, False)
liquidity_aml_dict.update(self._get_liquidity_move_line_vals(-amount))
aml_obj.create(liquidity_aml_dict)
#validate the payment
move.post()
#reconcile the invoice receivable/payable line(s) with the payment
self.invoice_ids.register_payment(counterpart_aml)
return move
def _create_transfer_entry(self, amount):
""" Create the journal entry corresponding to the 'incoming money' part of an internal transfer, return the reconciliable move line
"""
aml_obj = self.env['account.move.line'].with_context(check_move_validity=False)
debit, credit, amount_currency, dummy = aml_obj.with_context(date=self.payment_date).compute_amount_fields(amount, self.currency_id, self.company_id.currency_id)
amount_currency = self.destination_journal_id.currency_id and self.currency_id.with_context(date=self.payment_date).compute(amount, self.destination_journal_id.currency_id) or 0
dst_move = self.env['account.move'].create(self._get_move_vals(self.destination_journal_id))
dst_liquidity_aml_dict = self._get_shared_move_line_vals(debit, credit, amount_currency, dst_move.id)
dst_liquidity_aml_dict.update({
'name': _('Transfer from %s') % self.journal_id.name,
'account_id': self.destination_journal_id.default_credit_account_id.id,
'currency_id': self.destination_journal_id.currency_id.id,
'payment_id': self.id,
'journal_id': self.destination_journal_id.id})
aml_obj.create(dst_liquidity_aml_dict)
transfer_debit_aml_dict = self._get_shared_move_line_vals(credit, debit, 0, dst_move.id)
transfer_debit_aml_dict.update({
'name': self.name,
'payment_id': self.id,
'account_id': self.company_id.transfer_account_id.id,
'journal_id': self.destination_journal_id.id})
if self.currency_id != self.company_id.currency_id:
transfer_debit_aml_dict.update({
'currency_id': self.currency_id.id,
'amount_currency': -self.amount,
})
transfer_debit_aml = aml_obj.create(transfer_debit_aml_dict)
dst_move.post()
return transfer_debit_aml
def _get_move_vals(self, journal=None):
""" Return dict to create the payment move
"""
journal = journal or self.journal_id
if not journal.sequence_id:
raise UserError(_('Configuration Error !'), _('The journal %s does not have a sequence, please specify one.') % journal.name)
if not journal.sequence_id.active:
raise UserError(_('Configuration Error !'), _('The sequence of journal %s is deactivated.') % journal.name)
name = self.move_name or journal.with_context(ir_sequence_date=self.payment_date).sequence_id.next_by_id()
return {
'name': name,
'date': self.payment_date,
'ref': self.communication or '',
'company_id': self.company_id.id,
'journal_id': journal.id,
}
def _get_shared_move_line_vals(self, debit, credit, amount_currency, move_id, invoice_id=False):
""" Returns values common to both move lines (except for debit, credit and amount_currency which are reversed)
"""
return {
'partner_id': self.payment_type in ('inbound', 'outbound') and self.env['res.partner']._find_accounting_partner(self.partner_id).id or False,
'invoice_id': invoice_id and invoice_id.id or False,
'move_id': move_id,
'debit': debit,
'credit': credit,
'amount_currency': amount_currency or False,
}
def _get_counterpart_move_line_vals(self, invoice=False):
if self.payment_type == 'transfer':
name = self.name
else:
name = ''
if self.partner_type == 'customer':
if self.payment_type == 'inbound':
name += _("Customer Payment")
elif self.payment_type == 'outbound':
name += _("Customer Credit Note")
elif self.partner_type == 'supplier':
if self.payment_type == 'inbound':
name += _("Vendor Credit Note")
elif self.payment_type == 'outbound':
name += _("Vendor Payment")
if invoice:
name += ': '
for inv in invoice:
if inv.move_id:
name += inv.number + ', '
name = name[:len(name)-2]
return {
'name': name,
'account_id': self.destination_account_id.id,
'journal_id': self.journal_id.id,
'currency_id': self.currency_id != self.company_id.currency_id and self.currency_id.id or False,
'payment_id': self.id,
}
def _get_liquidity_move_line_vals(self, amount):
name = self.name
if self.payment_type == 'transfer':
name = _('Transfer to %s') % self.destination_journal_id.name
vals = {
'name': name,
'account_id': self.payment_type in ('outbound','transfer') and self.journal_id.default_debit_account_id.id or self.journal_id.default_credit_account_id.id,
'payment_id': self.id,
'journal_id': self.journal_id.id,
'currency_id': self.currency_id != self.company_id.currency_id and self.currency_id.id or False,
}
# If the journal has a currency specified, the journal item need to be expressed in this currency
if self.journal_id.currency_id and self.currency_id != self.journal_id.currency_id:
amount = self.currency_id.with_context(date=self.payment_date).compute(amount, self.journal_id.currency_id)
debit, credit, amount_currency, dummy = self.env['account.move.line'].with_context(date=self.payment_date).compute_amount_fields(amount, self.journal_id.currency_id, self.company_id.currency_id)
vals.update({
'amount_currency': amount_currency,
'currency_id': self.journal_id.currency_id.id,
})
return vals
| richard-willowit/odoo | addons/account/models/account_payment.py | Python | gpl-3.0 | 35,920 | 0.004928 |
"""
JSONField automatically serializes most Python terms to JSON data.
Creates a TEXT field with a default value of "{}". See test_json.py for
more information.
from django.db import models
from django_extensions.db.fields import json
class LOL(models.Model):
extra = json.JSONField()
"""
from __future__ import absolute_import
from decimal import Decimal
import six
from django.conf import settings
from django.core.serializers.json import DjangoJSONEncoder
from django.db import models
try:
# Django >= 1.7
import json
except ImportError:
# Django <= 1.6 backwards compatibility
from django.utils import simplejson as json
def dumps(value):
return DjangoJSONEncoder().encode(value)
def loads(txt):
value = json.loads(
txt,
parse_float=Decimal,
encoding=settings.DEFAULT_CHARSET
)
return value
class JSONDict(dict):
"""
Hack so repr() called by dumpdata will output JSON instead of
Python formatted data. This way fixtures will work!
"""
def __repr__(self):
return dumps(self)
class JSONUnicode(six.text_type):
"""
As above
"""
def __repr__(self):
return dumps(self)
class JSONList(list):
"""
As above
"""
def __repr__(self):
return dumps(self)
class JSONField(six.with_metaclass(models.SubfieldBase, models.TextField)):
"""JSONField is a generic textfield that neatly serializes/unserializes
JSON objects seamlessly. Main thingy must be a dict object."""
def __init__(self, *args, **kwargs):
default = kwargs.get('default', None)
if default is None:
kwargs['default'] = '{}'
elif isinstance(default, (list, dict)):
kwargs['default'] = dumps(default)
models.TextField.__init__(self, *args, **kwargs)
def to_python(self, value):
"""Convert our string value to JSON after we load it from the DB"""
if value is None or value == '':
return {}
elif isinstance(value, six.string_types):
res = loads(value)
if isinstance(res, dict):
return JSONDict(**res)
elif isinstance(res, six.string_types):
return JSONUnicode(res)
elif isinstance(res, list):
return JSONList(res)
return res
else:
return value
def get_db_prep_save(self, value, connection, **kwargs):
"""Convert our JSON object to a string before we save"""
if value is None and self.null:
return None
# default values come in as strings; only non-strings should be
# run through `dumps`
if not isinstance(value, six.string_types):
value = dumps(value)
return super(JSONField, self).get_db_prep_save(value, connection=connection, **kwargs)
def south_field_triple(self):
"""Returns a suitable description of this field for South."""
# We'll just introspect the _actual_ field.
from south.modelsinspector import introspector
field_class = "django.db.models.fields.TextField"
args, kwargs = introspector(self)
# That's our definition!
return (field_class, args, kwargs)
def deconstruct(self):
name, path, args, kwargs = super(JSONField, self).deconstruct()
if self.default == '{}':
del kwargs['default']
return name, path, args, kwargs
| pabulumm/neighbors | lib/python3.4/site-packages/django_extensions/db/fields/json.py | Python | bsd-3-clause | 3,459 | 0.000289 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""setup
(C) Franck Barbenoire <fbarbenoire@yahoo.fr>
License : GPL v3"""
from distutils.core import setup
from setuptools import find_packages
setup(name = "django-openzoom",
version = "0.1.1",
description = "Django application for displaying very high resolution images",
author = "Franck Barbenoire",
author_email = "fbarbenoire@yahoo.fr",
url = "https://github.com/franckinux/django-openzoom",
packages = find_packages(),
include_package_data = True,
zip_safe = False,
classifiers = ['Development Status :: 3 - Alpha',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Framework :: Django',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content']
)
| franckinux/django-openzoom | setup.py | Python | gpl-3.0 | 925 | 0.035676 |
from __future__ import print_function
def validate_book(body):
'''
This does not only accept/refuse a book. It also returns an ENHANCED
version of body, with (mostly fts-related) additional fields.
This function is idempotent.
'''
if '_language' not in body:
raise ValueError('language needed')
if len(body['_language']) > 2:
raise ValueError('invalid language: %s' % body['_language'])
allfields = collectStrings(body)
body['_text_%s' % body['_language']] = ' '.join(allfields)
return body
def collectStrings(leftovers):
strings = []
if isinstance(leftovers, basestring):
return leftovers.split()
elif isinstance(leftovers, list):
for l in leftovers:
strings.extend(collectStrings(l))
return strings
elif isinstance(leftovers, dict):
for key, value in leftovers.items():
if not key.startswith('_'):
strings.extend(collectStrings(value))
return strings
else:
return strings
class DB(object):
'''
this class contains every query method and every operation on the index
'''
# Setup {{{2
def __init__(self, es, index_name):
self.es = es
self.index_name = index_name
# book_validator can adjust the book, and raise if it's not valid
self.book_validator = validate_book
def setup_db(self):
maps = {
'book': { # this need to be the document type!
# special elasticsearch field
# http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/mapping-timestamp-field.html
# initialized with element creation date, hidden by default in query result
"_timestamp" : { "enabled" : "true",
"store": "yes"},
"properties": {
"_text_en": {
"type": "string",
"analyzer": "english"
},
"_text_it": {
"type": "string",
"analyzer": "it_analyzer"
}
}
}
}
# Just like the default one
# http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/analysis-lang-analyzer.html#italian-analyzer
# but the stemmer changed from light_italian to italian
settings = {"analysis": {
"filter": {
"italian_elision": {
"type": "elision",
"articles": [
"c", "l", "all", "dall", "dell",
"nell", "sull", "coll", "pell",
"gl", "agl", "dagl", "degl", "negl",
"sugl", "un", "m", "t", "s", "v", "d"
]
},
"italian_stop": {
"type": "stop", "stopwords": "_italian_"},
"italian_stemmer": {
"type": "stemmer", "language": "italian"}
},
"analyzer": {
"it_analyzer": {
"type": "custom",
"tokenizer": "standard",
"filter": [
"italian_elision",
"lowercase",
"italian_stop",
"italian_stemmer"
]
}
}
}}
if not self.es.indices.exists(self.index_name):
self.es.indices.create(index=self.index_name,
body={'settings': settings,
'mappings': maps})
# End setup }}
# Queries {{{2
def __len__(self):
stats = self.es.indices.stats()
return stats['indices'][self.index_name]['total']['docs']['count']
def _search(self, body, size=30):
return self.es.search(index=self.index_name, body=body, size=size)
def _get_search_field(self, field, value):
return {'query':
{'match': {field: value}}
}
def mlt(self, _id):
'''
High-level method to do "more like this".
Its exact implementation can vary.
'''
query = {'more_like_this': {
# FIXME: text_* does not seem to work, so we're relying on listing
# them manually
'fields': ['book._text_it', 'book._text_en'],
'ids': [_id],
'min_term_freq': 1,
'min_doc_freq': 1,
}}
return self._search(dict(query=query))
def get_all_books(self, size=30):
return self._search({}, size=size)
def get_last_inserted(self, size=30):
query = { "fields": [ "_timestamp", "_source"],
"query" : { "match_all" : {} },
"sort" : [ {"_timestamp": "desc"} ] }
return self._search(body=query, size=size)
def get_books_simplequery(self, query):
return self._search(self._get_search_field('_all', query))
def get_books_multilanguage(self, query):
return self._search({'query': {'multi_match':
{'query': query, 'fields': '_text_*'}
}})
def get_books_by_title(self, title):
return self._search(self._get_search_field('title', title))
def get_books_by_actor(self, authorname):
return self._search(self._get_search_field('actors', authorname))
def get_book_by_id(self, id):
return self.es.get(index=self.index_name, id=id)
def get_books_querystring(self, query):
q = {'query': query, 'fields': ['_text_*']}
return self._search({'query': dict(query_string=q)})
def user_search(self, query):
'''
This acts like a "wrapper" that always point to the recommended
function for user searching.
'''
return self.get_books_querystring(query)
def autocomplete(self, fieldname, start):
raise NotImplementedError()
# End queries }}}
# Operations {{{2
def add_book(self, **book):
'''
Call it like this:
db.add_book(doc_type='book',
body={'title': 'foobar', '_language': 'it'})
'''
if 'doc_type' not in book:
book['doc_type'] = 'book'
book['body'] = validate_book(book['body'])
return self.es.create(index=self.index_name, **book)
def update_book(self, id, doc_type='book', body={}):
'''
Update a book. The "body" is merged with the current one.
Yes, it is NOT overwritten.
'''
# note that we are NOT overwriting all the _source, just merging
doc = {'doc': body}
ret = self.es.update(index=self.index_name, id=id,
doc_type=doc_type, body=doc)
# text_* fields need to be "updated"; atomicity is provided by the
# idempotency of validate_book
book = self.get_book_by_id(ret['_id'])['_source']
book = validate_book(book)
ret = self.es.update(index=self.index_name, id=id,
doc_type=doc_type, body={'doc': book})
return ret
def increment_download_count(self, id, fileIndex, doc_type='book'):
'''
Increment the download counter of a specific file
'''
body = self.es.get(index=self.index_name, id=id, doc_type='book', _source_include='_files')['_source']
body['_files'][fileIndex]['download_count'] += 1
self.es.update(index=self.index_name, id=id,
doc_type=doc_type, body={"doc":body})
# End operations }}}
# vim: set fdm=marker fdl=1:
| boyska/libreant | libreantdb/api.py | Python | agpl-3.0 | 7,780 | 0.002314 |
# -*- coding: UTF-8 -*-
##############################################################################
#
# OERPLib
# Copyright (C) 2012-2013 Sébastien Alix.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import xmlrpclib
import httplib
import socket
import sys
from urlparse import urlparse
# Defined later following the version of Python used
TimeoutTransport = None
TimeoutSafeTransport = None
class TimeoutServerProxy(xmlrpclib.ServerProxy):
"""xmlrpclib.ServerProxy overload to manage the timeout of the socket."""
def __init__(self, *args, **kwargs):
url = args[0]
https_ok = urlparse(url).scheme == 'https'
t = https_ok and TimeoutSafeTransport() or TimeoutTransport()
t.timeout = kwargs.get('timeout', 120)
if 'timeout' in kwargs:
del kwargs['timeout']
kwargs['transport'] = t
xmlrpclib.ServerProxy.__init__(self, *args, **kwargs)
if sys.version_info <= (2, 7):
# Python 2.5 and 2.6
# -- xmlrpclib.Transport with timeout support --
class TimeoutHTTPPy26(httplib.HTTP):
def __init__(self, host='', port=None, strict=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
if port == 0:
port = None
self._setup(self._connection_class(host, port, strict, timeout))
class TimeoutTransportPy26(xmlrpclib.Transport):
def __init__(self, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
*args, **kwargs):
xmlrpclib.Transport.__init__(self, *args, **kwargs)
self.timeout = timeout
def make_connection(self, host):
host, extra_headers, x509 = self.get_host_info(host)
conn = TimeoutHTTPPy26(host, timeout=self.timeout)
return conn
# -- xmlrpclib.SafeTransport with timeout support --
class TimeoutHTTPSPy26(httplib.HTTPS):
def __init__(self, host='', port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
if port == 0:
port = None
self._setup(self._connection_class(
host, port, key_file, cert_file, strict, timeout))
self.key_file = key_file
self.cert_file = cert_file
class TimeoutSafeTransportPy26(xmlrpclib.SafeTransport):
def __init__(self, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
*args, **kwargs):
xmlrpclib.Transport.__init__(self, *args, **kwargs)
self.timeout = timeout
def make_connection(self, host):
host, extra_headers, x509 = self.get_host_info(host)
conn = TimeoutHTTPSPy26(host, timeout=self.timeout)
return conn
# Define the TimeTransport and TimeSafeTransport class version to use
TimeoutTransport = TimeoutTransportPy26
TimeoutSafeTransport = TimeoutSafeTransportPy26
else:
# Python 2.7 and 3.X
# -- xmlrpclib.Transport with timeout support --
class TimeoutHTTPConnectionPy27(httplib.HTTPConnection):
def __init__(self, timeout, *args, **kwargs):
httplib.HTTPConnection.__init__(self, *args, **kwargs)
self.timeout = timeout
def connect(self):
httplib.HTTPConnection.connect(self)
self.sock.settimeout(self.timeout)
class TimeoutTransportPy27(xmlrpclib.Transport):
def __init__(self, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
*args, **kwargs):
xmlrpclib.Transport.__init__(self, *args, **kwargs)
self.timeout = timeout
def make_connection(self, host):
if self._connection and host == self._connection[0]:
return self._connection[1]
chost, self._extra_headers, x509 = self.get_host_info(host)
self._connection = host, TimeoutHTTPConnectionPy27(
self.timeout, chost)
return self._connection[1]
# -- xmlrpclib.SafeTransport with timeout support --
class TimeoutHTTPSConnectionPy27(httplib.HTTPSConnection):
def __init__(self, timeout, *args, **kwargs):
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
self.timeout = timeout
def connect(self):
httplib.HTTPSConnection.connect(self)
self.sock.settimeout(self.timeout)
class TimeoutSafeTransportPy27(xmlrpclib.SafeTransport):
def __init__(self, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
*args, **kwargs):
xmlrpclib.SafeTransport.__init__(self, *args, **kwargs)
self.timeout = timeout
def make_connection(self, host):
if self._connection and host == self._connection[0]:
return self._connection[1]
chost, self._extra_headers, x509 = self.get_host_info(host)
self._connection = host, TimeoutHTTPSConnectionPy27(
self.timeout, chost)
return self._connection[1]
# Define the TimeTransport and TimeSafeTransport class version to use
TimeoutTransport = TimeoutTransportPy27
TimeoutSafeTransport = TimeoutSafeTransportPy27
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| etsinko/oerplib | oerplib/rpc/xmlrpclib_custom.py | Python | lgpl-3.0 | 5,967 | 0.000168 |
"""Univariate features selection."""
# Authors: V. Michel, B. Thirion, G. Varoquaux, A. Gramfort, E. Duchesnay.
# L. Buitinck, A. Joly
# License: BSD 3 clause
import numpy as np
import warnings
from scipy import special, stats
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..preprocessing import LabelBinarizer
from ..utils import (as_float_array, check_array, check_X_y, safe_sqr,
safe_mask)
from ..utils.extmath import norm, safe_sparse_dot, row_norms
from ..utils.validation import check_is_fitted
from .base import SelectorMixin
def _clean_nans(scores):
"""
Fixes Issue #1240: NaNs can't be properly compared, so change them to the
smallest value of scores's dtype. -inf seems to be unreliable.
"""
# XXX where should this function be called? fit? scoring functions
# themselves?
scores = as_float_array(scores, copy=True)
scores[np.isnan(scores)] = np.finfo(scores.dtype).min
return scores
######################################################################
# Scoring functions
# The following function is a rewriting of scipy.stats.f_oneway
# Contrary to the scipy.stats.f_oneway implementation it does not
# copy the data while keeping the inputs unchanged.
def f_oneway(*args):
"""Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
sample1, sample2, ... : array_like, sparse matrices
The sample measurements should be given as arguments.
Returns
-------
F-value : float
The computed F-value of the test.
p-value : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
See ``scipy.stats.f_oneway`` that should give the same results while
being less efficient.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
n_classes = len(args)
args = [as_float_array(a) for a in args]
n_samples_per_class = np.array([a.shape[0] for a in args])
n_samples = np.sum(n_samples_per_class)
ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args)
sums_args = [np.asarray(a.sum(axis=0)) for a in args]
square_of_sums_alldata = sum(sums_args) ** 2
square_of_sums_args = [s ** 2 for s in sums_args]
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0.
for k, _ in enumerate(args):
ssbn += square_of_sums_args[k] / n_samples_per_class[k]
ssbn -= square_of_sums_alldata / float(n_samples)
sswn = sstot - ssbn
dfbn = n_classes - 1
dfwn = n_samples - n_classes
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
constant_features_idx = np.where(msw == 0.)[0]
if (np.nonzero(msb)[0].size != msb.size and constant_features_idx.size):
warnings.warn("Features %s are constant." % constant_features_idx,
UserWarning)
f = msb / msw
# flatten matrix to vector in sparse case
f = np.asarray(f).ravel()
prob = special.fdtrc(dfbn, dfwn, f)
return f, prob
def f_classif(X, y):
"""Compute the ANOVA F-value for the provided sample.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = [n_samples, n_features]
The set of regressors that will be tested sequentially.
y : array of shape(n_samples)
The data matrix.
Returns
-------
F : array, shape = [n_features,]
The set of F values.
pval : array, shape = [n_features,]
The set of p-values.
See also
--------
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])
args = [X[safe_mask(X, y == k)] for k in np.unique(y)]
return f_oneway(*args)
def _chisquare(f_obs, f_exp):
"""Fast replacement for scipy.stats.chisquare.
Version from https://github.com/scipy/scipy/pull/2525 with additional
optimizations.
"""
f_obs = np.asarray(f_obs, dtype=np.float64)
k = len(f_obs)
# Reuse f_obs for chi-squared statistics
chisq = f_obs
chisq -= f_exp
chisq **= 2
chisq /= f_exp
chisq = chisq.sum(axis=0)
return chisq, special.chdtrc(k - 1, chisq)
def chi2(X, y):
"""Compute chi-squared stats between each non-negative feature and class.
This score can be used to select the n_features features with the
highest values for the test chi-squared statistic from X, which must
contain only non-negative features such as booleans or frequencies
(e.g., term counts in document classification), relative to the classes.
Recall that the chi-square test measures dependence between stochastic
variables, so using this function "weeds out" the features that are the
most likely to be independent of class and therefore irrelevant for
classification.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features_in)
Sample vectors.
y : array-like, shape = (n_samples,)
Target vector (class labels).
Returns
-------
chi2 : array, shape = (n_features,)
chi2 statistics of each feature.
pval : array, shape = (n_features,)
p-values of each feature.
Notes
-----
Complexity of this algorithm is O(n_classes * n_features).
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
f_regression: F-value between label/feature for regression tasks.
"""
# XXX: we might want to do some of the following in logspace instead for
# numerical stability.
X = check_array(X, accept_sparse='csr')
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative.")
Y = LabelBinarizer().fit_transform(y)
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
observed = safe_sparse_dot(Y.T, X) # n_classes * n_features
feature_count = X.sum(axis=0).reshape(1, -1)
class_prob = Y.mean(axis=0).reshape(1, -1)
expected = np.dot(class_prob.T, feature_count)
return _chisquare(observed, expected)
def f_regression(X, y, center=True):
"""Univariate linear regression tests.
Quick linear model for testing the effect of a single regressor,
sequentially for many regressors.
This is done in 2 steps:
1. The cross correlation between each regressor and the target is computed,
that is, ((X[:, i] - mean(X[:, i])) * (y - mean_y)) / (std(X[:, i]) *
std(y)).
2. It is converted to an F score then to a p-value.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will be tested sequentially.
y : array of shape(n_samples).
The data matrix
center : True, bool,
If true, X and y will be centered.
Returns
-------
F : array, shape=(n_features,)
F values of features.
pval : array, shape=(n_features,)
p-values of F-scores.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
"""
if issparse(X) and center:
raise ValueError("center=True only allowed for dense data")
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64)
if center:
y = y - np.mean(y)
X = X.copy('F') # faster in fortran
X -= X.mean(axis=0)
# compute the correlation
corr = safe_sparse_dot(y, X)
corr /= row_norms(X.T)
corr /= norm(y)
# convert to p-value
degrees_of_freedom = y.size - (2 if center else 1)
F = corr ** 2 / (1 - corr ** 2) * degrees_of_freedom
pv = stats.f.sf(F, 1, degrees_of_freedom)
return F, pv
######################################################################
# Base classes
class _BaseFilter(BaseEstimator, SelectorMixin):
"""Initialize the univariate feature selection.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues) or a single array with scores.
"""
def __init__(self, score_func):
self.score_func = score_func
def fit(self, X, y):
"""Run score function on (X, y) and get the appropriate features.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'])
if not callable(self.score_func):
raise TypeError("The score function should be a callable, %s (%s) "
"was passed."
% (self.score_func, type(self.score_func)))
self._check_params(X, y)
score_func_ret = self.score_func(X, y)
if isinstance(score_func_ret, (list, tuple)):
self.scores_, self.pvalues_ = score_func_ret
self.pvalues_ = np.asarray(self.pvalues_)
else:
self.scores_ = score_func_ret
self.pvalues_ = None
self.scores_ = np.asarray(self.scores_)
return self
def _check_params(self, X, y):
pass
######################################################################
# Specific filters
######################################################################
class SelectPercentile(_BaseFilter):
"""Select features according to a percentile of the highest scores.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues) or a single array with scores.
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
percentile : int, optional, default=10
Percent of features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores, None if `score_func` returned only scores.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a continuous target.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, percentile=10):
super(SelectPercentile, self).__init__(score_func)
self.percentile = percentile
def _check_params(self, X, y):
if not 0 <= self.percentile <= 100:
raise ValueError("percentile should be >=0, <=100; got %r"
% self.percentile)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
# Cater for NaNs
if self.percentile == 100:
return np.ones(len(self.scores_), dtype=np.bool)
elif self.percentile == 0:
return np.zeros(len(self.scores_), dtype=np.bool)
scores = _clean_nans(self.scores_)
treshold = stats.scoreatpercentile(scores,
100 - self.percentile)
mask = scores > treshold
ties = np.where(scores == treshold)[0]
if len(ties):
max_feats = int(len(scores) * self.percentile / 100)
kept_ties = ties[:max_feats - mask.sum()]
mask[kept_ties] = True
return mask
class SelectKBest(_BaseFilter):
"""Select features according to the k highest scores.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues) or a single array with scores.
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
k : int or "all", optional, default=10
Number of top features to select.
The "all" option bypasses selection, for use in a parameter search.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores, None if `score_func` returned only scores.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a continious target.
SelectPercentile: Select features based on percentile of the highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, k=10):
super(SelectKBest, self).__init__(score_func)
self.k = k
def _check_params(self, X, y):
if not (self.k == "all" or 0 <= self.k <= X.shape[1]):
raise ValueError("k should be >=0, <= n_features; got %r."
"Use k='all' to return all features."
% self.k)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
if self.k == 'all':
return np.ones(self.scores_.shape, dtype=bool)
elif self.k == 0:
return np.zeros(self.scores_.shape, dtype=bool)
else:
scores = _clean_nans(self.scores_)
mask = np.zeros(scores.shape, dtype=bool)
# Request a stable sort. Mergesort takes more memory (~40MB per
# megafeature on x86-64).
mask[np.argsort(scores, kind="mergesort")[-self.k:]] = 1
return mask
class SelectFpr(_BaseFilter):
"""Filter: Select the pvalues below alpha based on a FPR test.
FPR test stands for False Positive Rate test. It controls the total
amount of false detections.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
alpha : float, optional
The highest p-value for features to be kept.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
mutual_info_classif:
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information between features and the target.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFpr, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return self.pvalues_ < self.alpha
class SelectFdr(_BaseFilter):
"""Filter: Select the p-values for an estimated false discovery rate
This uses the Benjamini-Hochberg procedure. ``alpha`` is an upper bound
on the expected false discovery rate.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
alpha : float, optional
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
References
----------
https://en.wikipedia.org/wiki/False_discovery_rate
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a contnuous target.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFdr, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
n_features = len(self.pvalues_)
sv = np.sort(self.pvalues_)
selected = sv[sv <= float(self.alpha) / n_features
* np.arange(n_features)]
if selected.size == 0:
return np.zeros_like(self.pvalues_, dtype=bool)
return self.pvalues_ <= selected.max()
class SelectFwe(_BaseFilter):
"""Filter: Select the p-values corresponding to Family-wise error rate
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
alpha : float, optional
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFwe, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return (self.pvalues_ < self.alpha / len(self.pvalues_))
######################################################################
# Generic filter
######################################################################
# TODO this class should fit on either p-values or scores,
# depending on the mode.
class GenericUnivariateSelect(_BaseFilter):
"""Univariate feature selector with configurable strategy.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues). For modes 'percentile' or 'kbest' it can return
a single array scores.
mode : {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'}
Feature selection mode.
param : float or int depending on the feature selection mode
Parameter of the corresponding mode.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores, None if `score_func` returned scores only.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a continuous target.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
"""
_selection_modes = {'percentile': SelectPercentile,
'k_best': SelectKBest,
'fpr': SelectFpr,
'fdr': SelectFdr,
'fwe': SelectFwe}
def __init__(self, score_func=f_classif, mode='percentile', param=1e-5):
super(GenericUnivariateSelect, self).__init__(score_func)
self.mode = mode
self.param = param
def _make_selector(self):
selector = self._selection_modes[self.mode](score_func=self.score_func)
# Now perform some acrobatics to set the right named parameter in
# the selector
possible_params = selector._get_param_names()
possible_params.remove('score_func')
selector.set_params(**{possible_params[0]: self.param})
return selector
def _check_params(self, X, y):
if self.mode not in self._selection_modes:
raise ValueError("The mode passed should be one of %s, %r,"
" (type %s) was passed."
% (self._selection_modes.keys(), self.mode,
type(self.mode)))
self._make_selector()._check_params(X, y)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
selector = self._make_selector()
selector.pvalues_ = self.pvalues_
selector.scores_ = self.scores_
return selector._get_support_mask()
| toastedcornflakes/scikit-learn | sklearn/feature_selection/univariate_selection.py | Python | bsd-3-clause | 25,381 | 0.000394 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from os.path import join, isfile
from os import walk
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
def read_file(filename):
with open(filename) as fp:
return fp.read().strip()
def read_requirements(filename):
return [line.strip() for line in read_file(filename).splitlines()
if not line.startswith('#')]
NAME = 'gerapy'
FOLDER = 'gerapy'
DESCRIPTION = 'Distributed Crawler Management Framework Based on Scrapy, Scrapyd, Scrapyd-Client, Scrapyd-API, Django and Vue.js'
URL = 'https://github.com/Gerapy/Gerapy'
EMAIL = 'cqc@cuiqingcai.com'
AUTHOR = 'Germey'
REQUIRES_PYTHON = '>=3.5.0'
VERSION = None
REQUIRED = read_requirements('requirements.txt')
here = os.path.abspath(os.path.dirname(__file__))
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
about = {}
if not VERSION:
with open(os.path.join(here, FOLDER, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
def package_files(directories):
paths = []
for item in directories:
if isfile(item):
paths.append(join('..', item))
continue
for (path, directories, filenames) in walk(item):
for filename in filenames:
paths.append(join('..', path, filename))
return paths
class UploadCommand(Command):
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system(
'{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=('tests',)),
install_requires=REQUIRED,
include_package_data=True,
license='MIT',
entry_points={
'console_scripts': ['gerapy = gerapy.cmd:cmd']
},
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
package_data={
'': package_files([
'gerapy/server/static',
'gerapy/server/core/templates',
'gerapy/templates',
])
},
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
| Gerapy/Gerapy | setup.py | Python | mit | 3,683 | 0.000544 |
#
# Copyright 2017 Ericsson India Global Services Pvt Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from unittest import mock
import urllib
from oslotest import base
from ceilometer import service
from networking_odl.ceilometer.network.statistics.opendaylight_v2 import driver
from oslo_utils import uuidutils
ADMIN_ID = str(uuidutils.generate_uuid())
PORT_1_TENANT_ID = str(uuidutils.generate_uuid())
PORT_2_TENANT_ID = str(uuidutils.generate_uuid())
PORT_1_ID = str(uuidutils.generate_uuid())
PORT_2_ID = str(uuidutils.generate_uuid())
class _Base(base.BaseTestCase, metaclass=abc.ABCMeta):
@abc.abstractmethod
def switch_data(self):
pass
fake_odl_url = urllib.parse.ParseResult('opendaylight.v2',
'localhost:8080',
'controller/statistics',
None,
None,
None)
fake_params = urllib.parse.parse_qs(
'user=admin&password=admin&scheme=http&auth=basic')
def setUp(self):
super(_Base, self).setUp()
self.addCleanup(mock.patch.stopall)
conf = service.prepare_service([], [])
self.driver = driver.OpenDaylightDriver(conf)
ks_client = mock.Mock(auth_token='fake_token')
ks_client.projects.find.return_value = mock.Mock(name='admin',
id=ADMIN_ID)
self.ks_client = mock.patch('ceilometer.keystone_client.get_client',
return_value=ks_client).start()
self.get_statistics = mock.patch(
'networking_odl.ceilometer.network.statistics.opendaylight_v2.'
'client.SwitchStatisticsAPIClient.get_statistics',
return_value=self.switch_data).start()
def _test_for_meter(self, meter_name, expected_data):
sample_data = self.driver.get_sample_data(meter_name,
self.fake_odl_url,
self.fake_params,
{})
self.assertEqual(expected_data, list(sample_data))
class TestOpenDayLightDriverInvalid(_Base):
switch_data = {"flow_capable_switches": []}
def test_not_implemented_meter(self):
sample_data = self.driver.get_sample_data('egg',
self.fake_odl_url,
self.fake_params,
{})
self.assertIsNone(sample_data)
sample_data = self.driver.get_sample_data('switch.table.egg',
self.fake_odl_url,
self.fake_params,
{})
self.assertIsNone(sample_data)
def test_cache(self):
cache = {}
self.driver.get_sample_data('switch',
self.fake_odl_url,
self.fake_params,
cache)
self.driver.get_sample_data('switch',
self.fake_odl_url,
self.fake_params,
cache)
self.assertEqual(1, self.get_statistics.call_count)
cache = {}
self.driver.get_sample_data('switch',
self.fake_odl_url,
self.fake_params,
cache)
self.assertEqual(2, self.get_statistics.call_count)
def test_http_error(self):
mock.patch(
'networking_odl.ceilometer.network.statistics.opendaylight_v2.'
'client.SwitchStatisticsAPIClient.get_statistics',
side_effect=Exception()).start()
sample_data = self.driver.get_sample_data('switch',
self.fake_odl_url,
self.fake_params,
{})
self.assertEqual(0, len(sample_data))
mock.patch(
'networking_odl.ceilometer.network.statistics.opendaylight_v2.'
'client.SwitchStatisticsAPIClient.get_statistics',
side_effect=[Exception(), self.switch_data]).start()
cache = {}
self.driver.get_sample_data('switch',
self.fake_odl_url,
self.fake_params,
cache)
self.assertIn('network.statistics.opendaylight_v2', cache)
class TestOpenDayLightDriverSimple(_Base):
switch_data = {
"flow_capable_switches": [{
"packet_in_messages_received": "501",
"packet_out_messages_sent": "300",
"ports": "1",
"flow_datapath_id": "55120148545607",
"switch_port_counters": [{
"bytes_received": "0",
"bytes_sent": "0",
"duration": "600",
"packets_internal_received": "444",
"packets_internal_sent": "0",
"packets_received": "0",
"packets_received_drop": "0",
"packets_received_error": "0",
"packets_sent": "0",
"port_id": "4",
"tenant_id": PORT_1_TENANT_ID,
"uuid": PORT_1_ID
}],
"table_counters": [{
"flow_count": "90",
"table_id": "0"
}]
}]
}
def test_meter_switch(self):
expected_data = [
(1, "55120148545607",
{'controller': 'OpenDaylight_V2'},
ADMIN_ID),
]
self._test_for_meter('switch', expected_data)
def test_meter_switch_ports(self):
expected_data = [
(1, "55120148545607",
{'controller': 'OpenDaylight_V2'},
ADMIN_ID)
]
self._test_for_meter('switch.ports', expected_data)
def test_meter_switch_port(self):
expected_data = [
(1, '55120148545607:4', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 4,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port', expected_data)
def test_meter_switch_port_uptime(self):
expected_data = [
(600, '55120148545607:4', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 4,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.uptime', expected_data)
def test_meter_switch_port_receive_packets(self):
expected_data = [
(0, '55120148545607:4', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 4,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.receive.packets', expected_data)
def test_meter_switch_port_transmit_packets(self):
expected_data = [
(0, '55120148545607:4', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 4,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.transmit.packets', expected_data)
def test_meter_switch_port_receive_bytes(self):
expected_data = [
(0, '55120148545607:4', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 4,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.receive.bytes', expected_data)
def test_meter_switch_port_transmit_bytes(self):
expected_data = [
(0, '55120148545607:4', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 4,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.transmit.bytes', expected_data)
def test_meter_switch_port_receive_drops(self):
expected_data = [
(0, '55120148545607:4', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 4,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.receive.drops', expected_data)
def test_meter_switch_port_receive_errors(self):
expected_data = [
(0, '55120148545607:4', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 4,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.receive.errors', expected_data)
def test_meter_port(self):
expected_data = [
(1, PORT_1_ID,
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port', expected_data)
def test_meter_port_uptime(self):
expected_data = [
(600, PORT_1_ID,
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port.uptime', expected_data)
def test_meter_port_receive_packets(self):
expected_data = [
(0, PORT_1_ID,
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port.receive.packets', expected_data)
def test_meter_port_transmit_packets(self):
expected_data = [
(0, PORT_1_ID,
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port.transmit.packets', expected_data)
def test_meter_port_receive_bytes(self):
expected_data = [
(0, PORT_1_ID,
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port.receive.bytes', expected_data)
def test_meter_port_transmit_bytes(self):
expected_data = [
(0, PORT_1_ID,
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port.transmit.bytes', expected_data)
def test_meter_port_receive_drops(self):
expected_data = [
(0, PORT_1_ID,
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port.receive.drops', expected_data)
def test_meter_port_receive_errors(self):
expected_data = [
(0, PORT_1_ID,
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port.receive.errors', expected_data)
def test_meter_switch_table_active_entries(self):
expected_data = [
(90, "55120148545607:table:0", {
'switch': '55120148545607',
'controller': 'OpenDaylight_V2'
}, ADMIN_ID),
]
self._test_for_meter('switch.table.active.entries', expected_data)
class TestOpenDayLightDriverComplex(_Base):
switch_data = {
"flow_capable_switches": [{
"packet_in_messages_received": "501",
"packet_out_messages_sent": "300",
"ports": "3",
"flow_datapath_id": "55120148545607",
"switch_port_counters": [{
"bytes_received": "0",
"bytes_sent": "512",
"duration": "200",
"packets_internal_received": "444",
"packets_internal_sent": "0",
"packets_received": "10",
"packets_received_drop": "0",
"packets_received_error": "0",
"packets_sent": "0",
"port_id": "3",
}, {
"bytes_received": "9800",
"bytes_sent": "6540",
"duration": "150",
"packets_internal_received": "0",
"packets_internal_sent": "7650",
"packets_received": "20",
"packets_received_drop": "0",
"packets_received_error": "0",
"packets_sent": "0",
"port_id": "2",
"tenant_id": PORT_2_TENANT_ID,
"uuid": PORT_2_ID
}, {
"bytes_received": "100",
"bytes_sent": "840",
"duration": "100",
"packets_internal_received": "984",
"packets_internal_sent": "7950",
"packets_received": "9900",
"packets_received_drop": "1500",
"packets_received_error": "1000",
"packets_sent": "7890",
"port_id": "1",
"tenant_id": PORT_1_TENANT_ID,
"uuid": PORT_1_ID
}],
"table_counters": [{
"flow_count": "90",
"table_id": "10"
}, {
"flow_count": "80",
"table_id": "20"
}],
}, {
"packet_in_messages_received": "0",
"packet_out_messages_sent": "0",
"ports": "0",
"flow_datapath_id": "55120148545555",
"table_counters": [{
"flow_count": "5",
"table_id": "10"
}, {
"flow_count": "3",
"table_id": "20"
}],
}]
}
def test_meter_switch(self):
expected_data = [
(1, "55120148545607", {
'controller': 'OpenDaylight_V2'
}, ADMIN_ID),
(1, "55120148545555", {
'controller': 'OpenDaylight_V2'
}, ADMIN_ID),
]
self._test_for_meter('switch', expected_data)
def test_meter_switch_ports(self):
expected_data = [
(3, "55120148545607", {
'controller': 'OpenDaylight_V2'
}, ADMIN_ID),
(0, "55120148545555", {
'controller': 'OpenDaylight_V2'
}, ADMIN_ID),
]
self._test_for_meter('switch.ports', expected_data)
def test_meter_switch_port(self):
expected_data = [
(1, "55120148545607:3", {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 3,
'switch': '55120148545607'
}, ADMIN_ID),
(1, '55120148545607:2', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 2,
'neutron_port_id': PORT_2_ID,
'switch': '55120148545607'
}, ADMIN_ID),
(1, '55120148545607:1', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 1,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port', expected_data)
def test_meter_switch_port_uptime(self):
expected_data = [
(200, "55120148545607:3", {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 3,
'switch': '55120148545607'
}, ADMIN_ID),
(150, '55120148545607:2', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 2,
'neutron_port_id': PORT_2_ID,
'switch': '55120148545607'
}, ADMIN_ID),
(100, '55120148545607:1', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 1,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.uptime', expected_data)
def test_meter_switch_port_receive_packets(self):
expected_data = [
(10, "55120148545607:3", {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 3,
'switch': '55120148545607'
}, ADMIN_ID),
(20, '55120148545607:2', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 2,
'neutron_port_id': PORT_2_ID,
'switch': '55120148545607'
}, ADMIN_ID),
(9900, '55120148545607:1', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 1,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.receive.packets', expected_data)
def test_meter_switch_port_transmit_packets(self):
expected_data = [
(0, "55120148545607:3", {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 3,
'switch': '55120148545607'
}, ADMIN_ID),
(0, '55120148545607:2', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 2,
'neutron_port_id': PORT_2_ID,
'switch': '55120148545607'
}, ADMIN_ID),
(7890, '55120148545607:1', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 1,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.transmit.packets', expected_data)
def test_meter_switch_port_receive_bytes(self):
expected_data = [
(0, "55120148545607:3", {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 3,
'switch': '55120148545607'
}, ADMIN_ID),
(9800, '55120148545607:2', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 2,
'neutron_port_id': PORT_2_ID,
'switch': '55120148545607'
}, ADMIN_ID),
(100, '55120148545607:1', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 1,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.receive.bytes', expected_data)
def test_meter_switch_port_transmit_bytes(self):
expected_data = [
(512, "55120148545607:3", {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 3,
'switch': '55120148545607'
}, ADMIN_ID),
(6540, '55120148545607:2', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 2,
'neutron_port_id': PORT_2_ID,
'switch': '55120148545607'
}, ADMIN_ID),
(840, '55120148545607:1', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 1,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.transmit.bytes', expected_data)
def test_meter_switch_port_receive_drops(self):
expected_data = [
(0, "55120148545607:3", {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 3,
'switch': '55120148545607'
}, ADMIN_ID),
(0, '55120148545607:2', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 2,
'neutron_port_id': PORT_2_ID,
'switch': '55120148545607'
}, ADMIN_ID),
(1500, '55120148545607:1', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 1,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.receive.drops', expected_data)
def test_meter_switch_port_receive_errors(self):
expected_data = [
(0, "55120148545607:3", {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 3,
'switch': '55120148545607'
}, ADMIN_ID),
(0, '55120148545607:2', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 2,
'neutron_port_id': PORT_2_ID,
'switch': '55120148545607'
}, ADMIN_ID),
(1000, '55120148545607:1', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 1,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.receive.errors', expected_data)
def test_meter_port(self):
expected_data = [
(1, PORT_2_ID,
{'controller': 'OpenDaylight_V2'},
PORT_2_TENANT_ID),
(1, PORT_1_ID,
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port', expected_data)
def test_meter_port_uptime(self):
expected_data = [
(150, PORT_2_ID,
{'controller': 'OpenDaylight_V2'},
PORT_2_TENANT_ID),
(100, PORT_1_ID,
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port.uptime', expected_data)
def test_meter_port_receive_packets(self):
expected_data = [
(20, PORT_2_ID,
{'controller': 'OpenDaylight_V2'},
PORT_2_TENANT_ID),
(9900, PORT_1_ID,
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port.receive.packets', expected_data)
def test_meter_port_transmit_packets(self):
expected_data = [
(0, PORT_2_ID,
{'controller': 'OpenDaylight_V2'},
PORT_2_TENANT_ID),
(7890, PORT_1_ID,
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port.transmit.packets', expected_data)
def test_meter_port_receive_bytes(self):
expected_data = [
(9800, PORT_2_ID,
{'controller': 'OpenDaylight_V2'},
PORT_2_TENANT_ID),
(100, PORT_1_ID,
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port.receive.bytes', expected_data)
def test_meter_port_transmit_bytes(self):
expected_data = [
(6540, PORT_2_ID,
{'controller': 'OpenDaylight_V2'},
PORT_2_TENANT_ID),
(840, PORT_1_ID,
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port.transmit.bytes', expected_data)
def test_meter_port_receive_drops(self):
expected_data = [
(0, PORT_2_ID,
{'controller': 'OpenDaylight_V2'},
PORT_2_TENANT_ID),
(1500, PORT_1_ID,
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port.receive.drops', expected_data)
def test_meter_port_receive_errors(self):
expected_data = [
(0, PORT_2_ID,
{'controller': 'OpenDaylight_V2'},
PORT_2_TENANT_ID),
(1000, PORT_1_ID,
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port.receive.errors', expected_data)
def test_meter_switch_table_active_entries(self):
expected_data = [
(90, "55120148545607:table:10", {
'switch': '55120148545607',
'controller': 'OpenDaylight_V2'
}, ADMIN_ID),
(80, "55120148545607:table:20", {
'switch': '55120148545607',
'controller': 'OpenDaylight_V2'
}, ADMIN_ID),
(5, "55120148545555:table:10", {
'switch': '55120148545555',
'controller': 'OpenDaylight_V2'
}, ADMIN_ID),
(3, "55120148545555:table:20", {
'switch': '55120148545555',
'controller': 'OpenDaylight_V2'
}, ADMIN_ID),
]
self._test_for_meter('switch.table.active.entries', expected_data)
| openstack/networking-odl | networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/test_driver.py | Python | apache-2.0 | 25,998 | 0 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.dns.types import RecordType, ZoneDoesNotExistError
from libcloud.dns.types import RecordDoesNotExistError
from libcloud.dns.drivers.route53 import Route53DNSDriver
from libcloud.test import MockHttp
from libcloud.test.file_fixtures import DNSFileFixtures
from libcloud.test.secrets import DNS_PARAMS_ROUTE53
class Route53Tests(unittest.TestCase):
def setUp(self):
Route53DNSDriver.connectionCls.conn_class = Route53MockHttp
Route53MockHttp.type = None
self.driver = Route53DNSDriver(*DNS_PARAMS_ROUTE53)
def test_list_record_types(self):
record_types = self.driver.list_record_types()
self.assertEqual(len(record_types), 10)
self.assertTrue(RecordType.A in record_types)
def test_list_zones(self):
zones = self.driver.list_zones()
self.assertEqual(len(zones), 5)
zone = zones[0]
self.assertEqual(zone.id, '47234')
self.assertEqual(zone.type, 'master')
self.assertEqual(zone.domain, 't.com')
def test_list_records(self):
zone = self.driver.list_zones()[0]
records = self.driver.list_records(zone=zone)
self.assertEqual(len(records), 10)
record = records[1]
self.assertEqual(record.name, 'www')
self.assertEqual(record.id, 'A:www')
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data, '208.111.35.173')
self.assertEqual(record.extra['ttl'], 86400)
record = records[3]
self.assertEqual(record.type, RecordType.MX)
self.assertEqual(record.data, 'ASPMX.L.GOOGLE.COM.')
self.assertEqual(record.extra['priority'], 1)
record = records[4]
self.assertEqual(record.type, RecordType.MX)
self.assertEqual(record.data, 'ALT1.ASPMX.L.GOOGLE.COM.')
self.assertEqual(record.extra['priority'], 5)
record = records[8]
self.assertEqual(record.type, RecordType.SRV)
self.assertEqual(record.data, 'xmpp-server.example.com.')
self.assertEqual(record.extra['priority'], 1)
self.assertEqual(record.extra['weight'], 10)
self.assertEqual(record.extra['port'], 5269)
def test_get_zone(self):
zone = self.driver.get_zone(zone_id='47234')
self.assertEqual(zone.id, '47234')
self.assertEqual(zone.type, 'master')
self.assertEqual(zone.domain, 't.com')
def test_get_record(self):
record = self.driver.get_record(zone_id='47234',
record_id='CNAME:wibble')
self.assertEqual(record.name, 'wibble')
self.assertEqual(record.type, RecordType.CNAME)
self.assertEqual(record.data, 't.com')
def test_list_records_zone_does_not_exist(self):
zone = self.driver.list_zones()[0]
Route53MockHttp.type = 'ZONE_DOES_NOT_EXIST'
try:
self.driver.list_records(zone=zone)
except ZoneDoesNotExistError as e:
self.assertEqual(e.zone_id, zone.id)
else:
self.fail('Exception was not thrown')
def test_get_zone_does_not_exist(self):
Route53MockHttp.type = 'ZONE_DOES_NOT_EXIST'
try:
self.driver.get_zone(zone_id='47234')
except ZoneDoesNotExistError as e:
self.assertEqual(e.zone_id, '47234')
else:
self.fail('Exception was not thrown')
def test_get_record_zone_does_not_exist(self):
Route53MockHttp.type = 'ZONE_DOES_NOT_EXIST'
try:
self.driver.get_record(zone_id='4444', record_id='28536')
except ZoneDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_get_record_record_does_not_exist(self):
Route53MockHttp.type = 'RECORD_DOES_NOT_EXIST'
rid = 'CNAME:doesnotexist.t.com'
try:
self.driver.get_record(zone_id='47234',
record_id=rid)
except RecordDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_create_zone(self):
zone = self.driver.create_zone(domain='t.com', type='master',
ttl=None, extra=None)
self.assertEqual(zone.id, '47234')
self.assertEqual(zone.domain, 't.com')
def test_create_record(self):
zone = self.driver.list_zones()[0]
record = self.driver.create_record(
name='www', zone=zone,
type=RecordType.A, data='127.0.0.1',
extra={'ttl': 0}
)
self.assertEqual(record.id, 'A:www')
self.assertEqual(record.name, 'www')
self.assertEqual(record.zone, zone)
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data, '127.0.0.1')
def test_create_record_zone_name(self):
zone = self.driver.list_zones()[0]
record = self.driver.create_record(
name='', zone=zone,
type=RecordType.A, data='127.0.0.1',
extra={'ttl': 0}
)
self.assertEqual(record.id, 'A:')
self.assertEqual(record.name, '')
self.assertEqual(record.zone, zone)
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data, '127.0.0.1')
def test_create_TXT_record(self):
"""
Check that TXT records are created in quotes
"""
zone = self.driver.list_zones()[0]
record = self.driver.create_record(
name='', zone=zone,
type=RecordType.TXT, data='test'
)
self.assertEqual(record.id, 'TXT:')
self.assertEqual(record.name, '')
self.assertEqual(record.zone, zone)
self.assertEqual(record.type, RecordType.TXT)
self.assertEqual(record.data, '"test"')
def test_create_TXT_record_quoted(self):
"""
Check that TXT values already quoted are not changed
"""
zone = self.driver.list_zones()[0]
record = self.driver.create_record(
name='', zone=zone,
type=RecordType.TXT, data='"test"'
)
self.assertEqual(record.id, 'TXT:')
self.assertEqual(record.name, '')
self.assertEqual(record.zone, zone)
self.assertEqual(record.type, RecordType.TXT)
self.assertEqual(record.data, '"test"')
def test_create_SPF_record(self):
"""
Check that SPF records are created in quotes
"""
zone = self.driver.list_zones()[0]
record = self.driver.create_record(
name='', zone=zone,
type=RecordType.SPF, data='test'
)
self.assertEqual(record.id, 'SPF:')
self.assertEqual(record.name, '')
self.assertEqual(record.zone, zone)
self.assertEqual(record.type, RecordType.SPF)
self.assertEqual(record.data, '"test"')
def test_create_SPF_record_quoted(self):
"""
Check that SPF values already quoted are not changed
"""
zone = self.driver.list_zones()[0]
record = self.driver.create_record(
name='', zone=zone,
type=RecordType.SPF, data='"test"'
)
self.assertEqual(record.id, 'SPF:')
self.assertEqual(record.name, '')
self.assertEqual(record.zone, zone)
self.assertEqual(record.type, RecordType.SPF)
self.assertEqual(record.data, '"test"')
def test_create_TXT_record_escaped(self):
"""
Check that TXT record with quotes inside are escaped correctly
"""
zone = self.driver.list_zones()[0]
record = self.driver.create_record(
name='', zone=zone,
type=RecordType.TXT, data='test "with"'
)
self.assertEqual(record.id, 'TXT:')
self.assertEqual(record.name, '')
self.assertEqual(record.zone, zone)
self.assertEqual(record.type, RecordType.TXT)
self.assertEqual(record.data, '"test \"with\""')
def test_create_multi_value_record(self):
zone = self.driver.list_zones()[0]
records = self.driver.ex_create_multi_value_record(
name='balancer', zone=zone,
type=RecordType.A, data='127.0.0.1\n127.0.0.2',
extra={'ttl': 0}
)
self.assertEqual(len(records), 2)
self.assertEqual(records[0].id, 'A:balancer')
self.assertEqual(records[1].id, 'A:balancer')
self.assertEqual(records[0].name, 'balancer')
self.assertEqual(records[1].name, 'balancer')
self.assertEqual(records[0].zone, zone)
self.assertEqual(records[1].zone, zone)
self.assertEqual(records[0].type, RecordType.A)
self.assertEqual(records[1].type, RecordType.A)
self.assertEqual(records[0].data, '127.0.0.1')
self.assertEqual(records[1].data, '127.0.0.2')
def test_update_record(self):
zone = self.driver.list_zones()[0]
record = self.driver.list_records(zone=zone)[1]
params = {
'record': record,
'name': 'www',
'type': RecordType.A,
'data': '::1',
'extra': {'ttle': 0}}
updated_record = self.driver.update_record(**params)
self.assertEqual(record.data, '208.111.35.173')
self.assertEqual(updated_record.id, 'A:www')
self.assertEqual(updated_record.name, 'www')
self.assertEqual(updated_record.zone, record.zone)
self.assertEqual(updated_record.type, RecordType.A)
self.assertEqual(updated_record.data, '::1')
def test_delete_zone(self):
zone = self.driver.list_zones()[0]
status = self.driver.delete_zone(zone=zone)
self.assertTrue(status)
def test_delete_zone_does_not_exist(self):
zone = self.driver.list_zones()[0]
Route53MockHttp.type = 'ZONE_DOES_NOT_EXIST'
try:
self.driver.delete_zone(zone=zone)
except ZoneDoesNotExistError as e:
self.assertEqual(e.zone_id, zone.id)
else:
self.fail('Exception was not thrown')
def test_delete_record(self):
zone = self.driver.list_zones()[0]
record = self.driver.list_records(zone=zone)[0]
status = self.driver.delete_record(record=record)
self.assertTrue(status)
def test_delete_record_does_not_exist(self):
zone = self.driver.list_zones()[0]
record = self.driver.list_records(zone=zone)[0]
Route53MockHttp.type = 'RECORD_DOES_NOT_EXIST'
try:
self.driver.delete_record(record=record)
except RecordDoesNotExistError as e:
self.assertEqual(e.record_id, record.id)
else:
self.fail('Exception was not thrown')
class Route53MockHttp(MockHttp):
fixtures = DNSFileFixtures('route53')
def _2012_02_29_hostedzone_47234(self, method, url, body, headers):
body = self.fixtures.load('get_zone.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _2012_02_29_hostedzone(self, method, url, body, headers):
# print method, url, body, headers
if method == "POST":
body = self.fixtures.load("create_zone.xml")
return (httplib.CREATED, body, {}, httplib.responses[httplib.OK])
body = self.fixtures.load('list_zones.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _2012_02_29_hostedzone_47234_rrset(self, method, url, body, headers):
body = self.fixtures.load('list_records.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _2012_02_29_hostedzone_47234_rrset_ZONE_DOES_NOT_EXIST(self, method,
url, body, headers):
body = self.fixtures.load('zone_does_not_exist.xml')
return (httplib.NOT_FOUND, body,
{}, httplib.responses[httplib.NOT_FOUND])
def _2012_02_29_hostedzone_4444_ZONE_DOES_NOT_EXIST(self, method,
url, body, headers):
body = self.fixtures.load('zone_does_not_exist.xml')
return (httplib.NOT_FOUND, body,
{}, httplib.responses[httplib.NOT_FOUND])
def _2012_02_29_hostedzone_47234_ZONE_DOES_NOT_EXIST(self, method,
url, body, headers):
body = self.fixtures.load('zone_does_not_exist.xml')
return (httplib.NOT_FOUND, body,
{}, httplib.responses[httplib.NOT_FOUND])
def _2012_02_29_hostedzone_47234_rrset_RECORD_DOES_NOT_EXIST(self, method,
url, body, headers):
if method == "POST":
body = self.fixtures.load('invalid_change_batch.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.BAD_REQUEST])
body = self.fixtures.load('record_does_not_exist.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _2012_02_29_hostedzone_47234_RECORD_DOES_NOT_EXIST(self, method,
url, body, headers):
body = self.fixtures.load('get_zone.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
| andrewsomething/libcloud | libcloud/test/dns/test_route53.py | Python | apache-2.0 | 14,213 | 0.000211 |
def sum_args(*args):
return sum(args)
| the-zebulan/CodeWars | katas/kyu_7/sum_of_all_arguments.py | Python | mit | 42 | 0 |
# i18n markers
def N_(msg):
'''
Single translatable string marker.
Does nothing, just a marker for \\*.pot file compilers.
Usage::
n = N_('translate me')
translated = env.gettext(n)
'''
return msg
class M_(object):
'''
Marker for translatable string with plural form.
Does not make a translation, just incapsulates a data about
the translatable string.
:param single: a single form
:param plural: a plural form. Count can be included in %\-format syntax
:param count_field: a key used to format
Usage::
message = M_(u'max length is %(max)d symbol',
u'max length is %(max)d symbols',
count_field="max")
m = message % {'max': 10}
trans = env.ngettext(m.single,
m.plural,
m.count
) % m.format_args
'''
def __init__(self, single, plural, count_field='count', format_args=None):
self.single = single
self.plural = plural
self.count_field = count_field
self.format_args = format_args
def __mod__(self, format_args):
'''
Returns a copy of the object with bound formatting args (as dict).
A key equal to `count_field` must be in `format_args`.
'''
return self.__class__(self.single, self.plural, count_field=self.count_field,
format_args=format_args)
@property
def count(self):
'''
A count based on `count_field` and `format_args`.
'''
args = self.format_args
if args is None or \
(isinstance(args, dict) and self.count_field not in args):
raise TypeError("count is required")
return args[self.count_field] if isinstance(args, dict) else args
def __unicode__(self):
args = self.format_args
if self.count == 1:
return self.single % args
return self.plural % args
| boltnev/iktomi | iktomi/utils/i18n.py | Python | mit | 2,033 | 0.001476 |
"""This module provides the components needed to build your own __import__
function. Undocumented functions are obsolete.
In most cases it is preferred you consider using the importlib module's
functionality over this module.
"""
# (Probably) need to stay in _imp
from _imp import (lock_held, acquire_lock, release_lock,
get_frozen_object, is_frozen_package,
init_frozen, is_builtin, is_frozen,
_fix_co_filename)
try:
from _imp import create_dynamic
except ImportError:
# Platform doesn't support dynamic loading.
create_dynamic = None
from importlib._bootstrap import _ERR_MSG, _exec, _load, _builtin_from_name
from importlib._bootstrap_external import SourcelessFileLoader
from importlib import machinery
from importlib import util
import importlib
import os
import sys
import tokenize
import types
import warnings
warnings.warn("the imp module is deprecated in favour of importlib; "
"see the module's documentation for alternative uses",
PendingDeprecationWarning, stacklevel=2)
# DEPRECATED
SEARCH_ERROR = 0
PY_SOURCE = 1
PY_COMPILED = 2
C_EXTENSION = 3
PY_RESOURCE = 4
PKG_DIRECTORY = 5
C_BUILTIN = 6
PY_FROZEN = 7
PY_CODERESOURCE = 8
IMP_HOOK = 9
def new_module(name):
"""**DEPRECATED**
Create a new module.
The module is not entered into sys.modules.
"""
return types.ModuleType(name)
def get_magic():
"""**DEPRECATED**
Return the magic number for .pyc files.
"""
return util.MAGIC_NUMBER
def get_tag():
"""Return the magic tag for .pyc files."""
return sys.implementation.cache_tag
def cache_from_source(path, debug_override=None):
"""**DEPRECATED**
Given the path to a .py file, return the path to its .pyc file.
The .py file does not need to exist; this simply returns the path to the
.pyc file calculated as if the .py file were imported.
If debug_override is not None, then it must be a boolean and is used in
place of sys.flags.optimize.
If sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return util.cache_from_source(path, debug_override)
def source_from_cache(path):
"""**DEPRECATED**
Given the path to a .pyc. file, return the path to its .py file.
The .pyc file does not need to exist; this simply returns the path to
the .py file calculated to correspond to the .pyc file. If path does
not conform to PEP 3147 format, ValueError will be raised. If
sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
return util.source_from_cache(path)
def get_suffixes():
"""**DEPRECATED**"""
extensions = [(s, 'rb', C_EXTENSION) for s in machinery.EXTENSION_SUFFIXES]
source = [(s, 'r', PY_SOURCE) for s in machinery.SOURCE_SUFFIXES]
bytecode = [(s, 'rb', PY_COMPILED) for s in machinery.BYTECODE_SUFFIXES]
return extensions + source + bytecode
class NullImporter:
"""**DEPRECATED**
Null import object.
"""
def __init__(self, path):
if path == '':
raise ImportError('empty pathname', path='')
elif os.path.isdir(path):
raise ImportError('existing directory', path=path)
def find_module(self, fullname):
"""Always returns None."""
return None
class _HackedGetData:
"""Compatibility support for 'file' arguments of various load_*()
functions."""
def __init__(self, fullname, path, file=None):
super().__init__(fullname, path)
self.file = file
def get_data(self, path):
"""Gross hack to contort loader to deal w/ load_*()'s bad API."""
if self.file and path == self.path:
if not self.file.closed:
file = self.file
else:
self.file = file = open(self.path, 'r')
with file:
# Technically should be returning bytes, but
# SourceLoader.get_code() just passed what is returned to
# compile() which can handle str. And converting to bytes would
# require figuring out the encoding to decode to and
# tokenize.detect_encoding() only accepts bytes.
return file.read()
else:
return super().get_data(path)
class _LoadSourceCompatibility(_HackedGetData, machinery.SourceFileLoader):
"""Compatibility support for implementing load_source()."""
def load_source(name, pathname, file=None):
loader = _LoadSourceCompatibility(name, pathname, file)
spec = util.spec_from_file_location(name, pathname, loader=loader)
if name in sys.modules:
module = _exec(spec, sys.modules[name])
else:
module = _load(spec)
# To allow reloading to potentially work, use a non-hacked loader which
# won't rely on a now-closed file object.
module.__loader__ = machinery.SourceFileLoader(name, pathname)
module.__spec__.loader = module.__loader__
return module
class _LoadCompiledCompatibility(_HackedGetData, SourcelessFileLoader):
"""Compatibility support for implementing load_compiled()."""
def load_compiled(name, pathname, file=None):
"""**DEPRECATED**"""
loader = _LoadCompiledCompatibility(name, pathname, file)
spec = util.spec_from_file_location(name, pathname, loader=loader)
if name in sys.modules:
module = _exec(spec, sys.modules[name])
else:
module = _load(spec)
# To allow reloading to potentially work, use a non-hacked loader which
# won't rely on a now-closed file object.
module.__loader__ = SourcelessFileLoader(name, pathname)
module.__spec__.loader = module.__loader__
return module
def load_package(name, path):
"""**DEPRECATED**"""
if os.path.isdir(path):
extensions = (machinery.SOURCE_SUFFIXES[:] +
machinery.BYTECODE_SUFFIXES[:])
for extension in extensions:
path = os.path.join(path, '__init__'+extension)
if os.path.exists(path):
break
else:
raise ValueError('{!r} is not a package'.format(path))
spec = util.spec_from_file_location(name, path,
submodule_search_locations=[])
if name in sys.modules:
return _exec(spec, sys.modules[name])
else:
return _load(spec)
def load_module(name, file, filename, details):
"""**DEPRECATED**
Load a module, given information returned by find_module().
The module name must include the full package name, if any.
"""
suffix, mode, type_ = details
if mode and (not mode.startswith(('r', 'U')) or '+' in mode):
raise ValueError('invalid file open mode {!r}'.format(mode))
elif file is None and type_ in {PY_SOURCE, PY_COMPILED}:
msg = 'file object required for import (type code {})'.format(type_)
raise ValueError(msg)
elif type_ == PY_SOURCE:
return load_source(name, filename, file)
elif type_ == PY_COMPILED:
return load_compiled(name, filename, file)
elif type_ == C_EXTENSION and load_dynamic is not None:
if file is None:
with open(filename, 'rb') as opened_file:
return load_dynamic(name, filename, opened_file)
else:
return load_dynamic(name, filename, file)
elif type_ == PKG_DIRECTORY:
return load_package(name, filename)
elif type_ == C_BUILTIN:
return init_builtin(name)
elif type_ == PY_FROZEN:
return init_frozen(name)
else:
msg = "Don't know how to import {} (type code {})".format(name, type_)
raise ImportError(msg, name=name)
def find_module(name, path=None):
"""**DEPRECATED**
Search for a module.
If path is omitted or None, search for a built-in, frozen or special
module and continue search in sys.path. The module name cannot
contain '.'; to search for a submodule of a package, pass the
submodule name and the package's __path__.
"""
if not isinstance(name, str):
raise TypeError("'name' must be a str, not {}".format(type(name)))
elif not isinstance(path, (type(None), list)):
# Backwards-compatibility
raise RuntimeError("'list' must be None or a list, "
"not {}".format(type(name)))
if path is None:
if is_builtin(name):
return None, None, ('', '', C_BUILTIN)
elif is_frozen(name):
return None, None, ('', '', PY_FROZEN)
else:
path = sys.path
for entry in path:
package_directory = os.path.join(entry, name)
for suffix in ['.py', machinery.BYTECODE_SUFFIXES[0]]:
package_file_name = '__init__' + suffix
file_path = os.path.join(package_directory, package_file_name)
if os.path.isfile(file_path):
return None, package_directory, ('', '', PKG_DIRECTORY)
for suffix, mode, type_ in get_suffixes():
file_name = name + suffix
file_path = os.path.join(entry, file_name)
if os.path.isfile(file_path):
break
else:
continue
break # Break out of outer loop when breaking out of inner loop.
else:
raise ImportError(_ERR_MSG.format(name), name=name)
encoding = None
if 'b' not in mode:
with open(file_path, 'rb') as file:
encoding = tokenize.detect_encoding(file.readline)[0]
file = open(file_path, mode, encoding=encoding)
return file, file_path, (suffix, mode, type_)
def reload(module):
"""**DEPRECATED**
Reload the module and return it.
The module must have been successfully imported before.
"""
return importlib.reload(module)
def init_builtin(name):
"""**DEPRECATED**
Load and return a built-in module by name, or None is such module doesn't
exist
"""
try:
return _builtin_from_name(name)
except ImportError:
return None
if create_dynamic:
def load_dynamic(name, path, file=None):
"""**DEPRECATED**
Load an extension module.
"""
import importlib.machinery
loader = importlib.machinery.ExtensionFileLoader(name, path)
# Issue #24748: Skip the sys.modules check in _load_module_shim;
# always load new extension
spec = importlib.machinery.ModuleSpec(
name=name, loader=loader, origin=path)
return _load(spec)
else:
load_dynamic = None
| Microvellum/Fluid-Designer | win64-vc/2.78/python/lib/imp.py | Python | gpl-3.0 | 10,631 | 0.000094 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.