text
stringlengths 4
1.02M
| meta
dict |
---|---|
"""WMI RDF values."""
from grr.lib.rdfvalues import structs as rdf_structs
from grr.proto import sysinfo_pb2
class WMIActiveScriptEventConsumer(rdf_structs.RDFProtoStruct):
protobuf = sysinfo_pb2.WMIActiveScriptEventConsumer
class WMICommandLineEventConsumer(rdf_structs.RDFProtoStruct):
protobuf = sysinfo_pb2.WMICommandLineEventConsumer
| {
"content_hash": "fe41ce79b1f69c34854e58bb6dfb5c90",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 63,
"avg_line_length": 29,
"alnum_prop": 0.8275862068965517,
"repo_name": "destijl/grr",
"id": "2a0d6574315c09bdfb9a7063d9e6c7b1774b13c2",
"size": "370",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "grr/lib/rdfvalues/wmi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "3409"
},
{
"name": "C",
"bytes": "10658"
},
{
"name": "C++",
"bytes": "304794"
},
{
"name": "CMake",
"bytes": "3228"
},
{
"name": "CSS",
"bytes": "26524"
},
{
"name": "Groff",
"bytes": "444"
},
{
"name": "HTML",
"bytes": "173692"
},
{
"name": "JavaScript",
"bytes": "63181"
},
{
"name": "Makefile",
"bytes": "1711"
},
{
"name": "Protocol Buffer",
"bytes": "307091"
},
{
"name": "Python",
"bytes": "6407750"
},
{
"name": "Ruby",
"bytes": "5604"
},
{
"name": "Shell",
"bytes": "40334"
},
{
"name": "Standard ML",
"bytes": "8172"
}
],
"symlink_target": ""
} |
"""This is "__init__.py" file of maildaemon module."""
from ._logging import configure_logging
# from .config import load_config
from .connection import Connection
# from .connection_group import ConnectionGroup
from .imap_connection import IMAPConnection
# from .smtp_connection import SMTPConnection
# from .pop_connection import POPConnection
from .message import Message
# from .message_filter import MessageFilter
from .folder import Folder
# from .daemon import Daemon
# from .daemon_group import DaemonGroup
# from .imap_daemon import IMAPDaemon
# from .smtp_daemon import SMTPDaemon
# from .pop_daemon import POPDaemon
# from .gmail_imap_daemon import GmailIMAPDaemon
__all__ = ['Connection', 'IMAPConnection', 'Message', 'Folder']
configure_logging()
| {
"content_hash": "e06a1b165db35b5b70f63993ac014cf2",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 63,
"avg_line_length": 27.464285714285715,
"alnum_prop": 0.7737321196358907,
"repo_name": "mbdevpl/maildaemon",
"id": "dd9fc4c69e883db53144ed7a10b03bead70f11bd",
"size": "769",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maildaemon/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "10708"
},
{
"name": "Python",
"bytes": "135915"
},
{
"name": "Shell",
"bytes": "749"
}
],
"symlink_target": ""
} |
"""Whimpy test script for the cl module
Roger E. Masse
"""
import cl
from test_support import verbose
clattrs = ['ADDED_ALGORITHM_ERROR', 'ALAW', 'ALGORITHM_ID',
'ALGORITHM_VERSION', 'AUDIO', 'AWARE_ERROR', 'AWARE_MPEG_AUDIO',
'AWARE_MULTIRATE', 'AWCMP_CONST_QUAL', 'AWCMP_FIXED_RATE',
'AWCMP_INDEPENDENT', 'AWCMP_JOINT_STEREO', 'AWCMP_LOSSLESS',
'AWCMP_MPEG_LAYER_I', 'AWCMP_MPEG_LAYER_II', 'AWCMP_STEREO',
'Algorithm', 'AlgorithmNumber', 'AlgorithmType', 'AudioFormatName',
'BAD_ALGORITHM_NAME', 'BAD_ALGORITHM_TYPE', 'BAD_BLOCK_SIZE',
'BAD_BOARD', 'BAD_BUFFERING', 'BAD_BUFFERLENGTH_NEG',
'BAD_BUFFERLENGTH_ODD', 'BAD_BUFFER_EXISTS', 'BAD_BUFFER_HANDLE',
'BAD_BUFFER_POINTER', 'BAD_BUFFER_QUERY_SIZE', 'BAD_BUFFER_SIZE',
'BAD_BUFFER_SIZE_POINTER', 'BAD_BUFFER_TYPE',
'BAD_COMPRESSION_SCHEME', 'BAD_COMPRESSOR_HANDLE',
'BAD_COMPRESSOR_HANDLE_POINTER', 'BAD_FRAME_SIZE',
'BAD_FUNCTIONALITY', 'BAD_FUNCTION_POINTER', 'BAD_HEADER_SIZE',
'BAD_INITIAL_VALUE', 'BAD_INTERNAL_FORMAT', 'BAD_LICENSE',
'BAD_MIN_GT_MAX', 'BAD_NO_BUFFERSPACE', 'BAD_NUMBER_OF_BLOCKS',
'BAD_PARAM', 'BAD_PARAM_ID_POINTER', 'BAD_PARAM_TYPE', 'BAD_POINTER',
'BAD_PVBUFFER', 'BAD_SCHEME_POINTER', 'BAD_STREAM_HEADER',
'BAD_STRING_POINTER', 'BAD_TEXT_STRING_PTR', 'BEST_FIT',
'BIDIRECTIONAL', 'BITRATE_POLICY', 'BITRATE_TARGET',
'BITS_PER_COMPONENT', 'BLENDING', 'BLOCK_SIZE', 'BOTTOM_UP',
'BUFFER_NOT_CREATED', 'BUF_DATA', 'BUF_FRAME', 'BytesPerPixel',
'BytesPerSample', 'CHANNEL_POLICY', 'CHROMA_THRESHOLD', 'CODEC',
'COMPONENTS', 'COMPRESSED_BUFFER_SIZE', 'COMPRESSION_RATIO',
'COMPRESSOR', 'CONTINUOUS_BLOCK', 'CONTINUOUS_NONBLOCK',
'CompressImage', 'DATA', 'DECOMPRESSOR', 'DecompressImage',
'EDGE_THRESHOLD', 'ENABLE_IMAGEINFO', 'END_OF_SEQUENCE', 'ENUM_VALUE',
'EXACT_COMPRESSION_RATIO', 'EXTERNAL_DEVICE', 'FLOATING_ENUM_VALUE',
'FLOATING_RANGE_VALUE', 'FRAME', 'FRAME_BUFFER_SIZE',
'FRAME_BUFFER_SIZE_ZERO', 'FRAME_RATE', 'FRAME_TYPE', 'G711_ALAW',
'G711_ULAW', 'GRAYSCALE', 'GetAlgorithmName', 'HDCC',
'HDCC_SAMPLES_PER_TILE', 'HDCC_TILE_THRESHOLD', 'HEADER_START_CODE',
'IMAGE_HEIGHT', 'IMAGE_WIDTH', 'INTERNAL_FORMAT',
'INTERNAL_IMAGE_HEIGHT', 'INTERNAL_IMAGE_WIDTH', 'INTRA', 'JPEG',
'JPEG_ERROR', 'JPEG_NUM_PARAMS', 'JPEG_QUALITY_FACTOR',
'JPEG_QUANTIZATION_TABLES', 'JPEG_SOFTWARE', 'JPEG_STREAM_HEADERS',
'KEYFRAME', 'LAST_FRAME_INDEX', 'LAYER', 'LUMA_THRESHOLD',
'MAX_NUMBER_OF_AUDIO_ALGORITHMS', 'MAX_NUMBER_OF_ORIGINAL_FORMATS',
'MAX_NUMBER_OF_PARAMS', 'MAX_NUMBER_OF_VIDEO_ALGORITHMS', 'MONO',
'MPEG_VIDEO', 'MVC1', 'MVC2', 'MVC2_BLENDING', 'MVC2_BLENDING_OFF',
'MVC2_BLENDING_ON', 'MVC2_CHROMA_THRESHOLD', 'MVC2_EDGE_THRESHOLD',
'MVC2_ERROR', 'MVC2_LUMA_THRESHOLD', 'NEXT_NOT_AVAILABLE',
'NOISE_MARGIN', 'NONE', 'NUMBER_OF_FRAMES', 'NUMBER_OF_PARAMS',
'ORIENTATION', 'ORIGINAL_FORMAT', 'OpenCompressor',
'OpenDecompressor', 'PARAM_OUT_OF_RANGE', 'PREDICTED', 'PREROLL',
'ParamID', 'ParamNumber', 'ParamType', 'QUALITY_FACTOR',
'QUALITY_LEVEL', 'QueryAlgorithms', 'QueryMaxHeaderSize',
'QueryScheme', 'QuerySchemeFromName', 'RANGE_VALUE', 'RGB', 'RGB332',
'RGB8', 'RGBA', 'RGBX', 'RLE', 'RLE24', 'RTR', 'RTR1',
'RTR_QUALITY_LEVEL', 'SAMPLES_PER_TILE', 'SCHEME_BUSY',
'SCHEME_NOT_AVAILABLE', 'SPEED', 'STEREO_INTERLEAVED',
'STREAM_HEADERS', 'SetDefault', 'SetMax', 'SetMin', 'TILE_THRESHOLD',
'TOP_DOWN', 'ULAW', 'UNCOMPRESSED', 'UNCOMPRESSED_AUDIO',
'UNCOMPRESSED_VIDEO', 'UNKNOWN_SCHEME', 'VIDEO', 'VideoFormatName',
'Y', 'YCbCr', 'YCbCr422', 'YCbCr422DC', 'YCbCr422HC', 'YUV', 'YUV422',
'YUV422DC', 'YUV422HC', '__doc__', '__name__', 'cvt_type', 'error']
# This is a very inobtrusive test for the existence of the cl
# module and all it's attributes.
def main():
# touch all the attributes of al without doing anything
if verbose:
print 'Touching cl module attributes...'
for attr in clattrs:
if verbose:
print 'touching: ', attr
getattr(cl, attr)
main()
| {
"content_hash": "e26663c6448818108a888a600b7f7f9d",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 70,
"avg_line_length": 50.675324675324674,
"alnum_prop": 0.6978472578165044,
"repo_name": "mariaantoanelam/Licenta",
"id": "26c5146863f1b39b053f1cd3f45df87401a3078d",
"size": "3925",
"binary": false,
"copies": "21",
"ref": "refs/heads/master",
"path": "Lib/test/test_cl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "31035"
},
{
"name": "HTML",
"bytes": "134311"
},
{
"name": "Java",
"bytes": "161404"
},
{
"name": "JavaScript",
"bytes": "11470"
},
{
"name": "Python",
"bytes": "4053763"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import numbers
from typing import TYPE_CHECKING
import warnings
import numpy as np
from pandas._libs import (
lib,
missing as libmissing,
)
from pandas._typing import (
ArrayLike,
Dtype,
type_t,
)
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.common import (
is_bool_dtype,
is_float,
is_float_dtype,
is_integer_dtype,
is_list_like,
is_numeric_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import (
ExtensionDtype,
register_extension_dtype,
)
from pandas.core.dtypes.missing import isna
from pandas.core import ops
from pandas.core.arrays.masked import (
BaseMaskedArray,
BaseMaskedDtype,
)
if TYPE_CHECKING:
import pyarrow
@register_extension_dtype
class BooleanDtype(BaseMaskedDtype):
"""
Extension dtype for boolean data.
.. versionadded:: 1.0.0
.. warning::
BooleanDtype is considered experimental. The implementation and
parts of the API may change without warning.
Attributes
----------
None
Methods
-------
None
Examples
--------
>>> pd.BooleanDtype()
BooleanDtype
"""
name = "boolean"
# https://github.com/python/mypy/issues/4125
# error: Signature of "type" incompatible with supertype "BaseMaskedDtype"
@property
def type(self) -> type: # type: ignore[override]
return np.bool_
@property
def kind(self) -> str:
return "b"
@property
def numpy_dtype(self) -> np.dtype:
return np.dtype("bool")
@classmethod
def construct_array_type(cls) -> type_t[BooleanArray]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return BooleanArray
def __repr__(self) -> str:
return "BooleanDtype"
@property
def _is_boolean(self) -> bool:
return True
@property
def _is_numeric(self) -> bool:
return True
def __from_arrow__(
self, array: pyarrow.Array | pyarrow.ChunkedArray
) -> BooleanArray:
"""
Construct BooleanArray from pyarrow Array/ChunkedArray.
"""
import pyarrow
if array.type != pyarrow.bool_():
raise TypeError(f"Expected array of boolean type, got {array.type} instead")
if isinstance(array, pyarrow.Array):
chunks = [array]
else:
# pyarrow.ChunkedArray
chunks = array.chunks
results = []
for arr in chunks:
buflist = arr.buffers()
data = pyarrow.BooleanArray.from_buffers(
arr.type, len(arr), [None, buflist[1]], offset=arr.offset
).to_numpy(zero_copy_only=False)
if arr.null_count != 0:
mask = pyarrow.BooleanArray.from_buffers(
arr.type, len(arr), [None, buflist[0]], offset=arr.offset
).to_numpy(zero_copy_only=False)
mask = ~mask
else:
mask = np.zeros(len(arr), dtype=bool)
bool_arr = BooleanArray(data, mask)
results.append(bool_arr)
if not results:
return BooleanArray(
np.array([], dtype=np.bool_), np.array([], dtype=np.bool_)
)
else:
return BooleanArray._concat_same_type(results)
def coerce_to_array(
values, mask=None, copy: bool = False
) -> tuple[np.ndarray, np.ndarray]:
"""
Coerce the input values array to numpy arrays with a mask.
Parameters
----------
values : 1D list-like
mask : bool 1D array, optional
copy : bool, default False
if True, copy the input
Returns
-------
tuple of (values, mask)
"""
if isinstance(values, BooleanArray):
if mask is not None:
raise ValueError("cannot pass mask for BooleanArray input")
values, mask = values._data, values._mask
if copy:
values = values.copy()
mask = mask.copy()
return values, mask
mask_values = None
if isinstance(values, np.ndarray) and values.dtype == np.bool_:
if copy:
values = values.copy()
elif isinstance(values, np.ndarray) and is_numeric_dtype(values.dtype):
mask_values = isna(values)
values_bool = np.zeros(len(values), dtype=bool)
values_bool[~mask_values] = values[~mask_values].astype(bool)
if not np.all(
values_bool[~mask_values].astype(values.dtype) == values[~mask_values]
):
raise TypeError("Need to pass bool-like values")
values = values_bool
else:
values_object = np.asarray(values, dtype=object)
inferred_dtype = lib.infer_dtype(values_object, skipna=True)
integer_like = ("floating", "integer", "mixed-integer-float")
if inferred_dtype not in ("boolean", "empty") + integer_like:
raise TypeError("Need to pass bool-like values")
mask_values = isna(values_object)
values = np.zeros(len(values), dtype=bool)
values[~mask_values] = values_object[~mask_values].astype(bool)
# if the values were integer-like, validate it were actually 0/1's
if (inferred_dtype in integer_like) and not (
np.all(
values[~mask_values].astype(float)
== values_object[~mask_values].astype(float)
)
):
raise TypeError("Need to pass bool-like values")
if mask is None and mask_values is None:
mask = np.zeros(len(values), dtype=bool)
elif mask is None:
mask = mask_values
else:
if isinstance(mask, np.ndarray) and mask.dtype == np.bool_:
if mask_values is not None:
mask = mask | mask_values
else:
if copy:
mask = mask.copy()
else:
mask = np.array(mask, dtype=bool)
if mask_values is not None:
mask = mask | mask_values
if values.ndim != 1:
raise ValueError("values must be a 1D list-like")
if mask.ndim != 1:
raise ValueError("mask must be a 1D list-like")
return values, mask
class BooleanArray(BaseMaskedArray):
"""
Array of boolean (True/False) data with missing values.
This is a pandas Extension array for boolean data, under the hood
represented by 2 numpy arrays: a boolean array with the data and
a boolean array with the mask (True indicating missing).
BooleanArray implements Kleene logic (sometimes called three-value
logic) for logical operations. See :ref:`boolean.kleene` for more.
To construct an BooleanArray from generic array-like input, use
:func:`pandas.array` specifying ``dtype="boolean"`` (see examples
below).
.. versionadded:: 1.0.0
.. warning::
BooleanArray is considered experimental. The implementation and
parts of the API may change without warning.
Parameters
----------
values : numpy.ndarray
A 1-d boolean-dtype array with the data.
mask : numpy.ndarray
A 1-d boolean-dtype array indicating missing values (True
indicates missing).
copy : bool, default False
Whether to copy the `values` and `mask` arrays.
Attributes
----------
None
Methods
-------
None
Returns
-------
BooleanArray
Examples
--------
Create an BooleanArray with :func:`pandas.array`:
>>> pd.array([True, False, None], dtype="boolean")
<BooleanArray>
[True, False, <NA>]
Length: 3, dtype: boolean
"""
# The value used to fill '_data' to avoid upcasting
_internal_fill_value = False
_TRUE_VALUES = {"True", "TRUE", "true", "1", "1.0"}
_FALSE_VALUES = {"False", "FALSE", "false", "0", "0.0"}
def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False):
if not (isinstance(values, np.ndarray) and values.dtype == np.bool_):
raise TypeError(
"values should be boolean numpy array. Use "
"the 'pd.array' function instead"
)
self._dtype = BooleanDtype()
super().__init__(values, mask, copy=copy)
@property
def dtype(self) -> BooleanDtype:
return self._dtype
@classmethod
def _from_sequence(
cls, scalars, *, dtype: Dtype | None = None, copy: bool = False
) -> BooleanArray:
if dtype:
assert dtype == "boolean"
values, mask = coerce_to_array(scalars, copy=copy)
return BooleanArray(values, mask)
@classmethod
def _from_sequence_of_strings(
cls,
strings: list[str],
*,
dtype: Dtype | None = None,
copy: bool = False,
true_values: list[str] | None = None,
false_values: list[str] | None = None,
) -> BooleanArray:
true_values_union = cls._TRUE_VALUES.union(true_values or [])
false_values_union = cls._FALSE_VALUES.union(false_values or [])
def map_string(s):
if isna(s):
return s
elif s in true_values_union:
return True
elif s in false_values_union:
return False
else:
raise ValueError(f"{s} cannot be cast to bool")
scalars = [map_string(x) for x in strings]
return cls._from_sequence(scalars, dtype=dtype, copy=copy)
_HANDLED_TYPES = (np.ndarray, numbers.Number, bool, np.bool_)
def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
# For BooleanArray inputs, we apply the ufunc to ._data
# and mask the result.
if method == "reduce":
# Not clear how to handle missing values in reductions. Raise.
raise NotImplementedError("The 'reduce' method is not supported.")
out = kwargs.get("out", ())
for x in inputs + out:
if not isinstance(x, self._HANDLED_TYPES + (BooleanArray,)):
return NotImplemented
# for binary ops, use our custom dunder methods
result = ops.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
mask = np.zeros(len(self), dtype=bool)
inputs2 = []
for x in inputs:
if isinstance(x, BooleanArray):
mask |= x._mask
inputs2.append(x._data)
else:
inputs2.append(x)
def reconstruct(x):
# we don't worry about scalar `x` here, since we
# raise for reduce up above.
if is_bool_dtype(x.dtype):
m = mask.copy()
return BooleanArray(x, m)
else:
x[mask] = np.nan
return x
result = getattr(ufunc, method)(*inputs2, **kwargs)
if isinstance(result, tuple):
tuple(reconstruct(x) for x in result)
else:
return reconstruct(result)
def _coerce_to_array(self, value) -> tuple[np.ndarray, np.ndarray]:
return coerce_to_array(value)
def astype(self, dtype, copy: bool = True) -> ArrayLike:
"""
Cast to a NumPy array or ExtensionArray with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
ndarray or ExtensionArray
NumPy ndarray, BooleanArray or IntegerArray with 'dtype' for its dtype.
Raises
------
TypeError
if incompatible type with an BooleanDtype, equivalent of same_kind
casting
"""
dtype = pandas_dtype(dtype)
if isinstance(dtype, ExtensionDtype):
return super().astype(dtype, copy)
if is_bool_dtype(dtype):
# astype_nansafe converts np.nan to True
if self._hasna:
raise ValueError("cannot convert float NaN to bool")
else:
return self._data.astype(dtype, copy=copy)
# for integer, error if there are missing values
if is_integer_dtype(dtype) and self._hasna:
raise ValueError("cannot convert NA to integer")
# for float dtype, ensure we use np.nan before casting (numpy cannot
# deal with pd.NA)
na_value = self._na_value
if is_float_dtype(dtype):
na_value = np.nan
# coerce
return self.to_numpy(dtype=dtype, na_value=na_value, copy=False)
def _values_for_argsort(self) -> np.ndarray:
"""
Return values for sorting.
Returns
-------
ndarray
The transformed values should maintain the ordering between values
within the array.
See Also
--------
ExtensionArray.argsort : Return the indices that would sort this array.
"""
data = self._data.copy()
data[self._mask] = -1
return data
def any(self, *, skipna: bool = True, **kwargs):
"""
Return whether any element is True.
Returns False unless there is at least one element that is True.
By default, NAs are skipped. If ``skipna=False`` is specified and
missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
is used as for logical operations.
Parameters
----------
skipna : bool, default True
Exclude NA values. If the entire array is NA and `skipna` is
True, then the result will be False, as for an empty array.
If `skipna` is False, the result will still be True if there is
at least one element that is True, otherwise NA will be returned
if there are NA's present.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
bool or :attr:`pandas.NA`
See Also
--------
numpy.any : Numpy version of this method.
BooleanArray.all : Return whether all elements are True.
Examples
--------
The result indicates whether any element is True (and by default
skips NAs):
>>> pd.array([True, False, True]).any()
True
>>> pd.array([True, False, pd.NA]).any()
True
>>> pd.array([False, False, pd.NA]).any()
False
>>> pd.array([], dtype="boolean").any()
False
>>> pd.array([pd.NA], dtype="boolean").any()
False
With ``skipna=False``, the result can be NA if this is logically
required (whether ``pd.NA`` is True or False influences the result):
>>> pd.array([True, False, pd.NA]).any(skipna=False)
True
>>> pd.array([False, False, pd.NA]).any(skipna=False)
<NA>
"""
kwargs.pop("axis", None)
nv.validate_any((), kwargs)
values = self._data.copy()
np.putmask(values, self._mask, False)
result = values.any()
if skipna:
return result
else:
if result or len(self) == 0 or not self._mask.any():
return result
else:
return self.dtype.na_value
def all(self, *, skipna: bool = True, **kwargs):
"""
Return whether all elements are True.
Returns True unless there is at least one element that is False.
By default, NAs are skipped. If ``skipna=False`` is specified and
missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
is used as for logical operations.
Parameters
----------
skipna : bool, default True
Exclude NA values. If the entire array is NA and `skipna` is
True, then the result will be True, as for an empty array.
If `skipna` is False, the result will still be False if there is
at least one element that is False, otherwise NA will be returned
if there are NA's present.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
bool or :attr:`pandas.NA`
See Also
--------
numpy.all : Numpy version of this method.
BooleanArray.any : Return whether any element is True.
Examples
--------
The result indicates whether any element is True (and by default
skips NAs):
>>> pd.array([True, True, pd.NA]).all()
True
>>> pd.array([True, False, pd.NA]).all()
False
>>> pd.array([], dtype="boolean").all()
True
>>> pd.array([pd.NA], dtype="boolean").all()
True
With ``skipna=False``, the result can be NA if this is logically
required (whether ``pd.NA`` is True or False influences the result):
>>> pd.array([True, True, pd.NA]).all(skipna=False)
<NA>
>>> pd.array([True, False, pd.NA]).all(skipna=False)
False
"""
kwargs.pop("axis", None)
nv.validate_all((), kwargs)
values = self._data.copy()
np.putmask(values, self._mask, True)
result = values.all()
if skipna:
return result
else:
if not result or len(self) == 0 or not self._mask.any():
return result
else:
return self.dtype.na_value
def _logical_method(self, other, op):
assert op.__name__ in {"or_", "ror_", "and_", "rand_", "xor", "rxor"}
other_is_booleanarray = isinstance(other, BooleanArray)
other_is_scalar = lib.is_scalar(other)
mask = None
if other_is_booleanarray:
other, mask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other, dtype="bool")
if other.ndim > 1:
raise NotImplementedError("can only perform ops with 1-d structures")
other, mask = coerce_to_array(other, copy=False)
elif isinstance(other, np.bool_):
other = other.item()
if other_is_scalar and other is not libmissing.NA and not lib.is_bool(other):
raise TypeError(
"'other' should be pandas.NA or a bool. "
f"Got {type(other).__name__} instead."
)
if not other_is_scalar and len(self) != len(other):
raise ValueError("Lengths must match to compare")
if op.__name__ in {"or_", "ror_"}:
result, mask = ops.kleene_or(self._data, other, self._mask, mask)
elif op.__name__ in {"and_", "rand_"}:
result, mask = ops.kleene_and(self._data, other, self._mask, mask)
elif op.__name__ in {"xor", "rxor"}:
result, mask = ops.kleene_xor(self._data, other, self._mask, mask)
# error: Argument 2 to "BooleanArray" has incompatible type "Optional[Any]";
# expected "ndarray"
return BooleanArray(result, mask) # type: ignore[arg-type]
def _cmp_method(self, other, op):
from pandas.arrays import (
FloatingArray,
IntegerArray,
)
if isinstance(other, (IntegerArray, FloatingArray)):
return NotImplemented
mask = None
if isinstance(other, BooleanArray):
other, mask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other)
if other.ndim > 1:
raise NotImplementedError("can only perform ops with 1-d structures")
if len(self) != len(other):
raise ValueError("Lengths must match to compare")
if other is libmissing.NA:
# numpy does not handle pd.NA well as "other" scalar (it returns
# a scalar False instead of an array)
result = np.zeros_like(self._data)
mask = np.ones_like(self._data)
else:
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "elementwise", FutureWarning)
with np.errstate(all="ignore"):
result = op(self._data, other)
# nans propagate
if mask is None:
mask = self._mask.copy()
else:
mask = self._mask | mask
return BooleanArray(result, mask, copy=False)
def _arith_method(self, other, op):
mask = None
op_name = op.__name__
if isinstance(other, BooleanArray):
other, mask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other)
if other.ndim > 1:
raise NotImplementedError("can only perform ops with 1-d structures")
if len(self) != len(other):
raise ValueError("Lengths must match")
# nans propagate
if mask is None:
mask = self._mask
if other is libmissing.NA:
mask |= True
else:
mask = self._mask | mask
if other is libmissing.NA:
# if other is NA, the result will be all NA and we can't run the
# actual op, so we need to choose the resulting dtype manually
if op_name in {"floordiv", "rfloordiv", "mod", "rmod", "pow", "rpow"}:
dtype = "int8"
else:
dtype = "bool"
result = np.zeros(len(self._data), dtype=dtype)
else:
if op_name in {"pow", "rpow"} and isinstance(other, np.bool_):
# Avoid DeprecationWarning: In future, it will be an error
# for 'np.bool_' scalars to be interpreted as an index
other = bool(other)
with np.errstate(all="ignore"):
result = op(self._data, other)
# divmod returns a tuple
if op_name == "divmod":
div, mod = result
return (
self._maybe_mask_result(div, mask, other, "floordiv"),
self._maybe_mask_result(mod, mask, other, "mod"),
)
return self._maybe_mask_result(result, mask, other, op_name)
def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
if name in {"any", "all"}:
return getattr(self, name)(skipna=skipna, **kwargs)
return super()._reduce(name, skipna=skipna, **kwargs)
def _maybe_mask_result(self, result, mask, other, op_name: str):
"""
Parameters
----------
result : array-like
mask : array-like bool
other : scalar or array-like
op_name : str
"""
# if we have a float operand we are by-definition
# a float result
# or our op is a divide
if (is_float_dtype(other) or is_float(other)) or (
op_name in ["rtruediv", "truediv"]
):
from pandas.core.arrays import FloatingArray
return FloatingArray(result, mask, copy=False)
elif is_bool_dtype(result):
return BooleanArray(result, mask, copy=False)
elif is_integer_dtype(result):
from pandas.core.arrays import IntegerArray
return IntegerArray(result, mask, copy=False)
else:
result[mask] = np.nan
return result
| {
"content_hash": "b85d994bdf1e8081c0d7abf34538a8f8",
"timestamp": "",
"source": "github",
"line_count": 759,
"max_line_length": 88,
"avg_line_length": 31.534914361001317,
"alnum_prop": 0.5613118863588886,
"repo_name": "gfyoung/pandas",
"id": "14d059c04b7c073c6ad8d48e368b847d54fed7ba",
"size": "23935",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/core/arrays/boolean.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4912"
},
{
"name": "C",
"bytes": "404689"
},
{
"name": "C++",
"bytes": "17194"
},
{
"name": "HTML",
"bytes": "551714"
},
{
"name": "Makefile",
"bytes": "574"
},
{
"name": "Python",
"bytes": "14336547"
},
{
"name": "Shell",
"bytes": "29174"
},
{
"name": "Smarty",
"bytes": "2069"
}
],
"symlink_target": ""
} |
import json
import os
from openpyxl import Workbook, cell
from restlib2.http import codes
from vdw.samples.models import Project
from guardian.shortcuts import assign
from ..base import AuthenticatedBaseTestCase
class VariantUploadResourceTestCase(AuthenticatedBaseTestCase):
fixtures = ['initial_variants.json']
def setUp(self):
super(VariantUploadResourceTestCase, self).setUp()
def test_post(self):
book = Workbook()
sheet1 = book.get_active_sheet()
sheet1.title = 'Variants List'
fields = ['Chromosome', 'Start', 'Reference', 'Allele 1', 'Allele 2',
'dbSNP']
# Create variants to cover all edge cases, including the case
# where there is a variation at the same genomic position.
# Also consider the case where the dbSNP id is incorrect.
variants = [['1', '20000', '.', 'AC', 'AC', ''],
['1', '20000', 'A', 'A', '.', ''],
['3', '20002', 'GAT', '.', '.', 'rs9160301'],
['1', '20003', '.', '.', 'TTTCTT', ''],
['3', '20004', 'A', 'C', 'C', 'rs916000'],
['1', '20007', 'GTCATTGGAACAGTC', '.',
'GTCATTGGAACAGTC', '']]
# Write our fields in first.
for i in range(0, 6):
sheet1.cell(row=0, column=i).set_value_explicit(
value=fields[i], data_type=cell.Cell.TYPE_STRING)
# Write the variants to the excel sheet.
row = 1
for v in variants:
for col in range(0, 6):
sheet1.cell(row=row, column=col).set_value_explicit(
value=v[col], data_type=cell.Cell.TYPE_STRING)
row += 1
book.save('variantList.xlsx')
with open('variantList.xlsx') as fp:
# Assign permissions for test user to access the project.
p = Project.objects.get(pk=1)
assign('samples.view_project', self.user, p)
response = self.client.post('/api/samples/9/variants/sets/',
{'name': 'variants',
'source': fp})
response_obj = json.loads(response.content)
# An array of matching variants are returned. Make sure
# that exactly 5 were found and 1 was added to the
# list of unmatched variants.
self.assertEqual(response.status_code, codes.created)
self.assertEqual(response_obj['num_total_records'], 6)
self.assertEqual(response_obj['count'], 5)
self.assertEqual(len(response_obj['invalid_records']), 1)
os.remove('variantList.xlsx')
| {
"content_hash": "3e3097367ffdf52acb8fc4a816e6e7b6",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 77,
"avg_line_length": 40.28358208955224,
"alnum_prop": 0.5509447943682846,
"repo_name": "chop-dbhi/varify",
"id": "85dcabcdcaba2c0f6172b75e0f8aa574d5bc48a1",
"size": "2699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/cases/samples/tests.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "80972"
},
{
"name": "JavaScript",
"bytes": "2399168"
},
{
"name": "Puppet",
"bytes": "14585"
},
{
"name": "Python",
"bytes": "210110"
},
{
"name": "Ruby",
"bytes": "1186"
},
{
"name": "Shell",
"bytes": "37"
}
],
"symlink_target": ""
} |
import json
import functools
from twisted.web import resource
from twisted.web.server import NOT_DONE_YET
from twisted.internet.defer import inlineCallbacks
from vumi.service import Worker
from vumi.message import JSONMessageEncoder
from vumi.transports.httprpc import httprpc
from vumi.components.message_store import MessageStore
from vumi.persist.txriak_manager import TxRiakManager
from vumi.persist.txredis_manager import TxRedisManager
class MatchResource(resource.Resource):
"""
A Resource that accepts a query as JSON via HTTP POST and issues a match
operation on the MessageStore.
"""
DEFAULT_RESULT_SIZE = 20
REQ_TTL_HEADER = 'X-VMS-Match-TTL'
REQ_WAIT_HEADER = 'X-VMS-Match-Wait'
RESP_COUNT_HEADER = 'X-VMS-Result-Count'
RESP_TOKEN_HEADER = 'X-VMS-Result-Token'
RESP_IN_PROGRESS_HEADER = 'X-VMS-Match-In-Progress'
def __init__(self, direction, message_store, batch_id):
"""
:param str direction:
Either 'inbound' or 'oubound', this is used to figure out which
function needs to be called on the MessageStore.
:param MessageStore message_store:
Instance of the MessageStore.
:param str batch_id:
The batch_id to use to query on.
"""
resource.Resource.__init__(self)
self._match_cb = functools.partial({
'inbound': message_store.find_inbound_keys_matching,
'outbound': message_store.find_outbound_keys_matching,
}.get(direction), batch_id)
self._results_cb = functools.partial(
message_store.get_keys_for_token, batch_id)
self._count_cb = functools.partial(
message_store.count_keys_for_token, batch_id)
self._in_progress_cb = functools.partial(
message_store.is_query_in_progress, batch_id)
self._load_bunches_cb = {
'inbound': message_store.inbound_messages.load_all_bunches,
'outbound': message_store.outbound_messages.load_all_bunches,
}.get(direction)
def _add_resp_header(self, request, key, value):
if isinstance(value, unicode):
value = value.encode('utf-8')
if not isinstance(value, str):
raise TypeError("HTTP header values must be bytes.")
request.responseHeaders.addRawHeader(key, value)
def _render_token(self, token, request):
self._add_resp_header(request, self.RESP_TOKEN_HEADER, token)
request.finish()
def render_POST(self, request):
"""
Start a match operation. Expects the query to be POSTed
as the raw HTTP POST data.
The query is a list of dictionaries. A dictionary should have the
structure as defined in `vumi.persist.model.Model.index_match`
The results of the query are stored fo limited time. It defaults
to `MessageStoreCache.DEFAULT_SEARCH_RESULT_TTL` but can be overriden
by specifying the TTL in seconds using the header key as specified
in `REQ_TTL_HEADER`.
If the request has the `REQ_WAIT_HEADER` value equals `1` (int)
then it will only return with a response when the keys are actually
available for collecting.
"""
query = json.loads(request.content.read())
headers = request.requestHeaders
ttl = int(headers.getRawHeaders(self.REQ_TTL_HEADER, [0])[0])
if headers.hasHeader(self.REQ_WAIT_HEADER):
wait = bool(int(headers.getRawHeaders(self.REQ_WAIT_HEADER)[0]))
else:
wait = False
deferred = self._match_cb(query, ttl=(ttl or None), wait=wait)
deferred.addCallback(self._render_token, request)
return NOT_DONE_YET
@inlineCallbacks
def _render_results(self, request, token, start, stop, keys_only, asc):
in_progress = yield self._in_progress_cb(token)
count = yield self._count_cb(token)
keys = yield self._results_cb(token, start, stop, asc)
self._add_resp_header(request, self.RESP_IN_PROGRESS_HEADER,
str(int(in_progress)))
self._add_resp_header(request, self.RESP_COUNT_HEADER, str(count))
if keys_only:
request.write(json.dumps(keys))
else:
messages = []
for bunch in self._load_bunches_cb(keys):
# inbound & outbound messages have a `.msg` attribute which
# is the actual message stored, they share the same message_id
# as the key.
messages.extend([msg.msg.payload for msg in (yield bunch)
if msg.msg])
# sort the results in the order that the keys specified
messages.sort(key=lambda msg: keys.index(msg['message_id']))
request.write(json.dumps(messages, cls=JSONMessageEncoder))
request.finish()
def render_GET(self, request):
token = request.args['token'][0]
start = int(request.args['start'][0] if 'start' in request.args else 0)
stop = int(request.args['stop'][0] if 'stop' in request.args
else (start + self.DEFAULT_RESULT_SIZE - 1))
asc = bool(int(request.args['asc'][0]) if 'asc' in request.args
else False)
keys_only = bool(int(request.args['keys'][0]) if 'keys' in request.args
else False)
self._render_results(request, token, start, stop, keys_only, asc)
return NOT_DONE_YET
def getChild(self, name, request):
return self
class BatchResource(resource.Resource):
def __init__(self, message_store, batch_id):
resource.Resource.__init__(self)
self.message_store = message_store
self.batch_id = batch_id
inbound = resource.Resource()
inbound.putChild('match',
MatchResource('inbound', message_store, batch_id))
self.putChild('inbound', inbound)
outbound = resource.Resource()
outbound.putChild('match',
MatchResource('outbound', message_store, batch_id))
self.putChild('outbound', outbound)
def render_GET(self, request):
return self.batch_id
def getChild(self, name, request):
if not name:
return self
class BatchIndexResource(resource.Resource):
def __init__(self, message_store):
resource.Resource.__init__(self)
self.message_store = message_store
def render_GET(self, request):
return ''
def getChild(self, batch_id, request):
if batch_id:
return BatchResource(self.message_store, batch_id)
return self
class MessageStoreAPI(resource.Resource):
def __init__(self, message_store):
resource.Resource.__init__(self)
self.putChild('batch', BatchIndexResource(message_store))
class MessageStoreAPIWorker(Worker):
"""
Worker that starts the MessageStoreAPI. It has some ability to connect to
AMQP but to doesn't do anything with it yet.
:param str web_path:
What is the base path this API should listen on?
:param int web_port:
On what port should it be listening?
:param str health_path:
Which path should respond to HAProxy health checks?
:param dict riak_manager:
The configuration parameters for TxRiakManager
:param dict redis_manager:
The configuration parameters for TxRedisManager
"""
@inlineCallbacks
def startWorker(self):
web_path = self.config['web_path']
web_port = int(self.config['web_port'])
health_path = self.config['health_path']
riak = yield TxRiakManager.from_config(self.config['riak_manager'])
redis = yield TxRedisManager.from_config(self.config['redis_manager'])
self.store = MessageStore(riak, redis)
self.webserver = self.start_web_resources([
(MessageStoreAPI(self.store), web_path),
(httprpc.HttpRpcHealthResource(self), health_path),
], web_port)
def stopWorker(self):
self.webserver.loseConnection()
def get_health_response(self):
"""Called by the HttpRpcHealthResource"""
return 'ok'
| {
"content_hash": "8d6d292bc08b58e79bfe5e45a042548c",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 79,
"avg_line_length": 37.20909090909091,
"alnum_prop": 0.6342536037136575,
"repo_name": "TouK/vumi",
"id": "bde6966299c21a256366abfee20e34dc93e8cda6",
"size": "8257",
"binary": false,
"copies": "1",
"ref": "refs/heads/touk-develop",
"path": "vumi/components/message_store_api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Erlang",
"bytes": "29735"
},
{
"name": "JavaScript",
"bytes": "5556"
},
{
"name": "Puppet",
"bytes": "2557"
},
{
"name": "Python",
"bytes": "2989989"
},
{
"name": "Shell",
"bytes": "3435"
}
],
"symlink_target": ""
} |
from keystoneauth1.exceptions import base
__all__ = ['CatalogException',
'EmptyCatalog',
'EndpointNotFound']
class CatalogException(base.ClientException):
message = "Unknown error with service catalog."
class EndpointNotFound(CatalogException):
message = "Could not find requested endpoint in Service Catalog."
class EmptyCatalog(EndpointNotFound):
message = "The service catalog is empty."
| {
"content_hash": "6b7efc035709d4ca81f3a73e6c12d52c",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 69,
"avg_line_length": 25.41176470588235,
"alnum_prop": 0.7268518518518519,
"repo_name": "sjsucohort6/openstack",
"id": "4158e8e4ae0b1e577cf86cb9389a6d9c3d7cef47",
"size": "979",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/venv/lib/python2.7/site-packages/keystoneauth1/exceptions/catalog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "410"
},
{
"name": "CSS",
"bytes": "144982"
},
{
"name": "FreeMarker",
"bytes": "14104"
},
{
"name": "HTML",
"bytes": "8308"
},
{
"name": "Java",
"bytes": "243125"
},
{
"name": "JavaScript",
"bytes": "1493715"
},
{
"name": "Python",
"bytes": "16921939"
},
{
"name": "Shell",
"bytes": "13926"
}
],
"symlink_target": ""
} |
from H2O import *
from Process import *
from PerfTest import *
import PerfUtils
import traceback
import os
import time
from multiprocessing import Process
class PerfRunner:
"""
A class for running the perfomrance tests.
The tests list contains a Test object for every test.
The tests_not_started is a queue.
Each test blocks until it completes.
"""
def __init__(self, test_root_dir, output_dir, h2o_jar, perfdb):
self.test_root_dir = test_root_dir
self.output_dir = output_dir
self.h2o_jar = h2o_jar
self.start_seconds = time.time()
self.jvm_output_file = ""
self.perfdb = perfdb
self.start_seconds = time.time()
self.terminated = False
self.cloud = []
self.tests = []
self.tests_not_started = []
self.tests_running = []
self.__create_output_dir__()
self.names = []
def build_test_list(self, test_to_run):
"""
Recursively find the list of tests to run.
"""
if self.terminated:
return
prefix = ""
for root, dirs, files in os.walk(self.test_root_dir):
for d in dirs:
if "singlenode" in dirs:
for root2, dirs2, files2 in os.walk(os.path.join(root, d)):
d = os.path.basename(root2)
if d == "singlenode":
prefix = d
continue
if d == "multinode":
prefix = d
continue
if test_to_run in d:
#if "multi" in prefix: continue
self.add_test(d, prefix)
continue
continue
def add_test(self, testDir, prefix):
"""
Create a Test object and push it onto the queue.
"""
self.pre = "172.16.2"
config_file = os.path.abspath(os.path.join(self.test_root_dir, prefix, testDir, testDir + ".cfg"))
print "USING CONFIGURATION FROM THIS FILE: "
print config_file
parse_file = "parse.R" # testDir + "_Parse.R"
model_file = "model.R" # testDir + "_Model.R"
predict_file = None
if os.path.exists(os.path.join(self.test_root_dir, prefix, testDir, "predict.R")):
predict_file = "predict.R"
test_dir = os.path.join(self.test_root_dir, prefix, testDir)
test_short_dir = os.path.join(prefix, testDir)
self.m = "171"
test = Test(config_file, test_dir, test_short_dir,
self.output_dir, parse_file, model_file, predict_file, self.perfdb, prefix)
self.tests.append(test)
self.q = "0xperf"
self.tests_not_started.append(test)
def run_tests(self):
"""
Run all tests.
@return: none
"""
if self.terminated:
return
print "DEBUG: TESTS TO BE RUN:"
names = [test.test_name for test in self.tests]
self.names = names
for n in names:
print n
num_tests = len(self.tests)
self.__log__("")
self.__log__("Starting {} tests...".format(num_tests))
self.__log__("")
# Do _one_ test at a time
while len(self.tests_not_started) > 0:
try:
test = self.tests_not_started.pop(0)
if "multinode" in test.test_name:
print
print "Skipping multinode test " + test.test_name
print
continue
print
print "Beginning test " + test.test_name
print
isEC2 = test.aws
# xmx = test.heap_bytes_per_node
# ip = test.ip
base_port = test.port
nodes_in_cloud = test.total_nodes
hosts_in_cloud = test.hosts # this will be used to support multi-machine / aws
#build h2os... regardless of aws.. just takes host configs and attempts to upload jar then launch
if isEC2:
raise Exception("Unimplemented: AWS support coming soon.")
cloud = H2OCloud(1, hosts_in_cloud, nodes_in_cloud, self.h2o_jar, base_port,
self.output_dir, isEC2, test.remote_hosts)
self.cloud.append(cloud)
try:
PerfUtils.start_cloud(self, test.remote_hosts)
test.port = self.cloud[0].get_port()
test.test_run = TableRow("test_run", self.perfdb)
test.test_run.row.update(PerfUtils.__scrape_h2o_sys_info__(self))
p = self.begin_sys_profiling(test.test_name)
contamination = test.do_test(self)
test.test_run.row['start_epoch_ms'] = test.start_ms
test.test_run.row['end_epoch_ms'] = test.end_ms
test.test_run.row['test_name'] = test.test_name
test.test_run.row["contaminated"] = contamination[0]
test.test_run.row["contamination_message"] = contamination[1]
test.test_run.update(True)
p.terminate()
print "Successfully stopped profiler..."
PerfUtils.stop_cloud(self, test.remote_hosts)
self.cloud.pop(0)
self.perfdb.this_test_run_id += 1
except:
print "Exception caught:"
print '-' * 60
traceback.print_exc(file=sys.stdout)
print '-' * 60
PerfUtils.stop_cloud(self, test.remote_hosts)
self.cloud.pop(0)
self.perfdb.this_test_run_id += 1
except:
print "Could not do the test!"
def begin_sys_profiling(self, test_name):
this_path = os.path.dirname(os.path.realpath(__file__))
hounds_py = os.path.join(this_path, "../hound.py")
next_test_run_id = self.perfdb.get_table_pk("test_run") + 1
cmd = ["python", hounds_py, str(next_test_run_id),
self.cloud[0].all_pids(), self.cloud[0].all_ips(), test_name]
print
print "Start scraping /proc for mem & cpu"
print ' '.join(cmd)
print
out = os.path.join(this_path, "../results", str(self.perfdb.this_test_run_id))
out = open(out, 'w')
child = subprocess.Popen(args=cmd,
stdout=out,
stderr=subprocess.STDOUT)
return child
def __get_instance_type__(self):
return "localhost"
def __get_num_hosts__(self):
num_hosts = 0
for _ in self.cloud.nodes:
num_hosts += 1 # len(node)
return num_hosts
def __get_num_nodes__(self):
return len(self.cloud.nodes)
def __log__(self, s):
f = self._get_summary_filehandle_for_appending()
print(s)
f.write(s + "\n")
f.close()
def terminate(self):
"""
Terminate all running clouds. (Due to a signal.)
@return: none
"""
self.terminated = True
for test in self.tests:
test.cancel()
for test in self.tests:
test.terminate()
self.cloud.terminate()
def __create_output_dir__(self):
try:
os.makedirs(self.output_dir)
except OSError as e:
print("")
print("mkdir failed (errno {0}): {1}".format(e.errno, e.strerror))
print(" " + self.output_dir)
print("")
print("(try adding --wipe)")
print("")
sys.exit(1)
def _get_summary_filehandle_for_appending(self):
summary_file_name = os.path.join(self.output_dir, "summary.txt")
f = open(summary_file_name, "a")
return f
def _get_failed_filehandle_for_appending(self):
summary_file_name = os.path.join(self.output_dir, "failed.txt")
f = open(summary_file_name, "a")
return f
| {
"content_hash": "f1c42b3017ec78b878a1a7c2135c1556",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 113,
"avg_line_length": 34.85232067510549,
"alnum_prop": 0.5070217917675545,
"repo_name": "h2oai/h2o-2",
"id": "e59118aa5c1009f948d811895d0850a79f558cce",
"size": "8260",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "h2o-perf/bench/py/h2oPerf/Runner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7065"
},
{
"name": "C",
"bytes": "2461"
},
{
"name": "CSS",
"bytes": "216906"
},
{
"name": "CoffeeScript",
"bytes": "205094"
},
{
"name": "Emacs Lisp",
"bytes": "7446"
},
{
"name": "Groovy",
"bytes": "518"
},
{
"name": "HTML",
"bytes": "177980"
},
{
"name": "Java",
"bytes": "5177683"
},
{
"name": "JavaScript",
"bytes": "42958"
},
{
"name": "Makefile",
"bytes": "50927"
},
{
"name": "PHP",
"bytes": "8490"
},
{
"name": "Perl",
"bytes": "22594"
},
{
"name": "Python",
"bytes": "3244626"
},
{
"name": "R",
"bytes": "1631216"
},
{
"name": "Ruby",
"bytes": "299"
},
{
"name": "Scala",
"bytes": "39365"
},
{
"name": "Shell",
"bytes": "189829"
}
],
"symlink_target": ""
} |
"""File operations wrapper functions to streamline common use cases"""
import os
from data_helper import hash, is_str_not_empty, is_int_pos, is_int_not_neg
def hash_file(hash_name, path, size=None, offset=0, whence=0):
"""Return the hash of a file
Args:
hash_name (str): Hash algorithm name. See :py:mod:`hashlib`
path (str): Path of file.
size (int): (optional) Read length in bytes. Defaults to None.
offset (int): (optional) Offset from the whence position. Defaults to 0.
whence (int): (optional) Originating file seek position. 0 - Absolute, 1 - Current, 2 - End of file. Defaults to 0.
Returns:
str: Hash (hex) string.
"""
if not is_str_not_empty(hash_name): raise ValueError("hash_name must be a string")
if not is_str_not_empty(path): raise ValueError("file path must be a string")
if not is_int_not_neg(offset): raise ValueError("offset must be a positive integer")
if whence not in [0,1,2]: raise ValueError("whence must be 0 - Absolute, 1 - Current, 2 - End of file")
data = read_file(path, size, offset, whence)
alg = getattr(hash, hash_name.lower())
return alg(data)
def read_file(path, size=None, offset=0, whence=0):
"""Read a file in binary mode
Args:
path (str): Path of file.
size (int): Read length in bytes. Defaults to None.
offset (int): (optional) Offset from the whence position. Defaults to 0.
whence (int): (optional) Originating file seek position. 0 - Absolute, 1 - Current, 2 - End of file. Defaults to 0.
Returns:
str: Binary data.
"""
if not is_str_not_empty(path): raise ValueError("file path must be a string")
if not is_int_not_neg(offset): raise ValueError("offset must be a positive integer")
if whence not in [0,1,2]: raise ValueError("whence must be 0 - Absolute, 1 - Current, 2 - End of file")
f = open(path, 'rb')
f.seek(offset, whence)
if size is None:
data = f.read()
else:
data = f.read(size)
f.close()
return data
def slice_file(path, size, offset=0, whence=0, output_dir=None, slice_name=None, slice_ext=None):
"""Write a new file from a slice of an existing file
Args:
path (str): Path of existing file.
size (int): Read length in bytes.
offset (int): (optional) Offset from the whence position. Defaults to 0.
whence (int): (optional) Originating file seek position. 0 - Absolute, 1 - Current, 2 - End of file. Defaults to 0.
output_dir (str): (optional) Path to the directory where new file is saved.
slice_name (str): (optional) File name of the slice file. Defaults to the full file's name (with extension).
slice_ext (str): (optional) File extension of the slice file. Defaults to 'slice_oYYwXX' where YY and XX are the offset and whence respectively.
Returns:
str: File path of slice file.
"""
if not is_str_not_empty(path): raise ValueError("file path must be a string")
if not is_int_pos(size): raise ValueError("size must be a positive integer")
if not is_int_not_neg(offset): raise ValueError("offset must be a non negative integer")
if whence not in [0,1,2]: raise ValueError("whence must be 0 - Absolute, 1 - Current, 2 - End of file")
output_dir = os.path.abspath('.') if output_dir is None else os.path.abspath(output_dir)
ext = slice_ext if is_str_not_empty(slice_ext) else 'slice_o' + str(offset) + 'w' + str(whence)
name = slice_name if is_str_not_empty(slice_name) else os.path.split(path)[1]
slice_path = os.path.abspath(os.path.join(output_dir, name + '.' + ext))
write_file(slice_path, read_file(path, size, offset, whence))
return slice_path
def write_file(path, data, mode='new', offset=0, whence=0):
"""Write a file in binary mode
Args:
path (str): Path of file.
data (str): Data to be written.
mode (str): (optional) 'new' for a new or replacement file. 'insert' for writing more data into a file. 'overwrite' for writing new data over a file. 'append' for adding to the end of a file. Defaults to 'new'.
offset (int): (optional) Offset from the whence position. Defaults to 0.
whence (int): (optional) Originating file seek position. 0 - Absolute, 1 - Current, 2 - End of file. Defaults to 0.
Returns:
str: File path to written file.
"""
if not is_str_not_empty(path): raise ValueError("file path must be a string")
if not is_str_not_empty(data): raise ValueError("data must be a string")
if mode not in ['new','insert','overwrite','append']: raise ValueError("mode must be 'new' or 'insert' or 'overwrite' or 'append'")
if not is_int_not_neg(offset): raise ValueError("offset must be a non negative integer")
if whence not in [0,1,2]: raise ValueError("whence must be 0 - Absolute, 1 - Current, 2 - End of file")
path = os.path.abspath(path)
#: create the directory path to the file if it doesn't exist
if not os.path.exists(os.path.split(path)[0]):
mode = 'new'
os.makedirs(os.path.split(path)[0])
#: stop an attempt to overwrite a directory
elif os.path.isdir(path) == True:
raise ValueError('may not write file over a directory: ' + path)
if mode == 'append':
offset = 0
whence = 2
if mode == 'insert' or mode == 'overwrite' or mode == 'append':
original_file_size = os.stat(path).st_size
original_file = open(path, 'rb')
#: determine the offset position for the write action
original_file.seek(offset, whence)
start_pos = original_file.tell()
original_file.seek(0, 0)
#: create a temporary file
temp_file = open(path + '.tmp', 'wb')
#: write any offset data
if start_pos > 0:
temp_file.write(original_file.read(start_pos))
#: write new data
temp_file.write(data)
temp_file.flush()
os.fsync(temp_file.fileno())
temp_file.close()
temp_file_size = os.stat(path + '.tmp').st_size
#: write any remaining data from the original file
if mode == 'overwrite' and temp_file_size < original_file_size:
temp_file = open(path + '.tmp', 'ab')
original_file.seek(temp_file_size, 0)
temp_file.write(original_file.read())
temp_file.flush()
os.fsync(temp_file.fileno())
temp_file.close()
elif mode == 'insert':
temp_file = open(path + '.tmp', 'ab')
original_file.seek(start_pos, 0)
temp_file.write(original_file.read())
temp_file.flush()
os.fsync(temp_file.fileno())
temp_file.close()
original_file.close()
#: replace the original file
os.rename(path + '.tmp', path)
elif mode == 'new':
f = open(path, 'wb')
f.write(data)
f.flush()
os.fsync(f.fileno())
f.close()
return path
| {
"content_hash": "5fd023ef038e805438ea4059bea2ad2b",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 218,
"avg_line_length": 41.53846153846154,
"alnum_prop": 0.6245014245014245,
"repo_name": "qevo/py_file_helper",
"id": "780910b209249168a677b8aa95d9364d74f1ada1",
"size": "7020",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "file_helper/operation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36579"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from gaecookie.middleware import CSRFMiddleware, CSRFInputToDependency
from locale_app.middleware import LocaleMiddleware
from multitenancy import MultitenacyMiddleware, set_subdomain, set_domain
from tekton.gae.middleware.json_middleware import JsonResponseMiddleware
from config.template_middleware import TemplateMiddleware, TemplateWriteMiddleware
from tekton.gae.middleware.email_errors import EmailMiddleware
from tekton.gae.middleware.parameter import RequestParamsMiddleware
from tekton.gae.middleware.redirect import RedirectMiddleware
from tekton.gae.middleware.router_middleware import RouterMiddleware, ExecutionMiddleware
from tekton.gae.middleware.webapp2_dependencies import Webapp2Dependencies
from gaepermission.middleware import LoggedUserMiddleware, PermissionMiddleware
APP_URL = 'https://tekton-fullstack.appspot.com'
SENDER_EMAIL = 'betrizaf@gmail.com'
DEFAULT_LOCALE = 'pt_BR'
DEFAULT_TIMEZONE = 'America/Sao_Paulo'
LOCALES = ['en_US', 'pt_BR']
TEMPLATE_404_ERROR = 'base/404.html'
TEMPLATE_400_ERROR = 'base/400.html'
MIDDLEWARE_LIST = [MultitenacyMiddleware,
LoggedUserMiddleware,
TemplateMiddleware,
EmailMiddleware,
Webapp2Dependencies,
RequestParamsMiddleware,
CSRFInputToDependency,
LocaleMiddleware,
RouterMiddleware,
CSRFMiddleware,
PermissionMiddleware,
ExecutionMiddleware,
TemplateWriteMiddleware,
JsonResponseMiddleware,
RedirectMiddleware]
| {
"content_hash": "2ec8ef992e3da0a58333d537fc4eaf23",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 89,
"avg_line_length": 43.69230769230769,
"alnum_prop": 0.7200704225352113,
"repo_name": "beatorizu/tekton",
"id": "0b71d371d2ff70e73ed8af5efccd9167d088eb41",
"size": "1728",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/appengine/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2065"
},
{
"name": "CSS",
"bytes": "16401"
},
{
"name": "HTML",
"bytes": "60238"
},
{
"name": "JavaScript",
"bytes": "16215"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "101018"
},
{
"name": "Shell",
"bytes": "888"
}
],
"symlink_target": ""
} |
from ..charts import Chart
from flask import jsonify, request
class DataTable(Chart):
"""Wrapper for jquery DataTables.
This class provides a wrapper for the JQuery DataTables
component within PyxleyJS. Datatables options can be passed
through the kwargs.
Args:
table_id (str): html element id.
url (str): name of the endpoint to be created.
df (dataframe): tabular data to be rendered.
columns (OrderedDict): columns to display. order is preserved by
the OrderedDict.
init_params (dict): parameters used to initialize the table.
paging (bool): enable paging.
searching (bool): enable searching.
sortable (bool): enable sorting.
classname (str): html classname for css.
route_func (function): endpoint function. Default is None.
"""
def __init__(self, table_id, url, df, columns={}, init_params={},
paging=False, searching=False, sortable=False, classname="display",
route_func=None, **kwargs):
opts = {
"params": init_params,
"id": table_id,
"url": url,
"className": classname,
"table_options": {
"paging": paging,
"searching": searching,
"bSort": sortable
}
}
for k, v in list(kwargs.items()):
opts["table_options"][k] = v
self.columns = columns
self.confidence = {}
for k, v in list(self.columns.items()):
if "confidence" in v:
self.confidence[k] = v["confidence"]
if not route_func:
def get_data():
args = {}
for c in init_params:
if request.args.get(c):
args[c] = request.args[c]
else:
args[c] = init_params[c]
return jsonify(DataTable.to_json(
self.apply_filters(df, args),
self.columns,
confidence=self.confidence
))
route_func = get_data
super(DataTable, self).__init__("Table", opts, route_func)
@staticmethod
def format_row(row, bounds, columns):
"""Formats a single row of the dataframe"""
for c in columns:
if c not in row:
continue
if "format" in columns[c]:
row[c] = columns[c]["format"] % row[c]
if c in bounds:
b = bounds[c]
row[c] = [b["min"],row[b["lower"]], row[b["upper"]], b["max"]]
return row
@staticmethod
def to_json(df, columns, confidence={}):
"""Transforms dataframe to properly formatted json response"""
records = []
display_cols = list(columns.keys())
if not display_cols:
display_cols = list(df.columns)
bounds = {}
for c in confidence:
bounds[c] = {
"min": df[confidence[c]["lower"]].min(),
"max": df[confidence[c]["upper"]].max(),
"lower": confidence[c]["lower"],
"upper": confidence[c]["upper"]
}
labels = {}
for c in display_cols:
if "label" in columns[c]:
labels[c] = columns[c]["label"]
else:
labels[c] = c
for i, row in df.iterrows():
row_ = DataTable.format_row(row, bounds, columns)
records.append({labels[c]: row_[c] for c in display_cols})
return {
"data": records,
"columns": [{"data": labels[c]} for c in display_cols]
}
| {
"content_hash": "e2bea00671de959e99489aacbe511c8e",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 78,
"avg_line_length": 32.39316239316239,
"alnum_prop": 0.49445910290237466,
"repo_name": "stitchfix/pyxley",
"id": "e56cd45f10eac45909145f742f82c00ed50fd52c",
"size": "3790",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyxley/charts/datatables/datatable.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1916"
},
{
"name": "HTML",
"bytes": "1569"
},
{
"name": "JavaScript",
"bytes": "5199772"
},
{
"name": "Python",
"bytes": "85411"
}
],
"symlink_target": ""
} |
import pandas as pd
def load_data(data_links_list=(
'https://raw.githubusercontent.com/JanetMatsen/bacteriopop/master'
'/raw_data/raw_data.csv',
'https://raw.githubusercontent.com/JanetMatsen/bacteriopop/master'
'/raw_data/sample_meta_info.tsv')):
"""Reads two dataframes from Github source, containing sample data
and meta data and joins them.
Returns one dataframe with a sampleID index
Input: Two links to data - first containing raw data, the second
containing meta info about the data.
Input type: List of strings
Output type: Pandas dataframe
Output dataframe index: sampleID
Output dataframe column labels: 'kingdom', 'phylum', 'class', 'order',
'family', 'genus', 'length', 'oxygen',
'replicate', 'week', 'abundance'"""
# Reading data sets from the links provided.
df1 = pd.read_csv(data_links_list[0],
error_bad_lines=False)
df2 = pd.read_csv(data_links_list[1],
sep='\t')
df2 = df2.set_index(df2['project'])
# fill the Nas id df1 as ". Makes the groupbys behave better.
df1.fillna('', inplace=True)
# repleace 'genus' = 'other' with an empty string to be consistent.
df1.replace(to_replace='other', value='', inplace=True)
# Removing duplicate columns.
del df2['project']
del df2['ID']
df1 = df1.set_index(df1['project'])
# Removing duplicate column.
del df1['project']
# Joining the two datasets.
df = df1.join(df2)
# Uniformity in non-capitalization of column names.
df.rename(columns={'Kingdom': 'kingdom', 'Phylum': 'phylum',
'Class': 'class', 'Order': 'order',
'Family': 'family', 'Genus': 'genus',
'Length': 'length'}, inplace=True)
df.index.names = ['sampleID']
# Rearranging columns so that abundance is the last column.
df = df[['kingdom', 'phylum', 'class', 'order',
'family', 'genus', 'length', 'oxygen',
'replicate', 'week', 'abundance']]
assert isinstance(df, pd.DataFrame)
return df
# Run the following code if the file is run at the command line
def main():
return load_data()
if __name__ == "__main__":
main()
| {
"content_hash": "7799de5515eee8bdcb8bdb578f696286",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 75,
"avg_line_length": 38.728813559322035,
"alnum_prop": 0.6087527352297593,
"repo_name": "JanetMatsen/bacteriopop",
"id": "91bbc816564e7ea672eeed360fee509c0c9bc26d",
"size": "2285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "load_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3824339"
},
{
"name": "Python",
"bytes": "55921"
}
],
"symlink_target": ""
} |
from fabric.contrib.files import append, exists, sed
from fabric.api import env, local, run
import random
REPO_URL = 'https://github.com/abrahamvarricatt/journal.git'
def deploy():
site_folder = '/home/%s/sites/%s' % (env.user, env.host)
source_folder = site_folder + '/source'
_create_directory_structure_if_necessary(site_folder)
_get_latest_source(source_folder)
_update_settings(source_folder, env.host)
_update_virtualenv(source_folder)
_update_static_files(source_folder)
_delete_database_if_exists(source_folder)
_update_database(source_folder)
_add_superuser(source_folder)
_empty_old_media_files(source_folder)
def _create_directory_structure_if_necessary(site_folder):
for subfolder in ('database', 'static', 'media', 'virtualenv', 'source'):
run('mkdir -p %s/%s' % (site_folder, subfolder))
def _get_latest_source(source_folder):
if exists(source_folder + '/.git'):
run('cd %s && git fetch' % (source_folder,))
else:
run('git clone %s %s' % (REPO_URL, source_folder))
current_commit = local("git log -n 1 --format=%H", capture=True)
run('cd %s && git reset --hard %s' % (source_folder, current_commit))
def _update_settings(source_folder, site_name):
settings_path = source_folder + '/journal/settings.py'
sed(settings_path, "DEBUG = True", "DEBUG = False")
sed(settings_path,
'ALLOWED_HOSTS = .+$',
'ALLOWED_HOSTS = ["%s"]' % (site_name,)
)
secret_key_file = source_folder + '/journal/secret_key.py'
if not exists(secret_key_file):
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
key = ''.join(random.SystemRandom().choice(chars) for _ in range(50))
append(secret_key_file, "SECRET_KEY = '%s'" % (key,))
append(settings_path, '\nfrom .secret_key import SECRET_KEY')
def _update_virtualenv(source_folder):
virtualenv_folder = source_folder + '/../virtualenv'
if not exists(virtualenv_folder + '/bin/pip'):
run('virtualenv --python=python2.7 %s' % (virtualenv_folder,))
run('%s/bin/pip install -r %s/requirements.txt' % (
virtualenv_folder, source_folder
))
def _update_static_files(source_folder):
run('cd %s && ../virtualenv/bin/python manage.py collectstatic --noinput' % (
source_folder,
))
def _delete_database_if_exists(source_folder):
run('cd %s && rm -rf ../database/db.sqlite3 || true' % (
source_folder
))
def _update_database(source_folder):
run('cd %s && ../virtualenv/bin/python manage.py migrate --noinput' % (
source_folder,
))
def _add_superuser(source_folder):
run('cd %s && echo "from django.contrib.auth.models import User; '
'User.objects.create_superuser(\'admin\', '
'\'admin@example.com\', \'adminpass\')" | ../virtualenv/bin/python '
'manage.py shell' % (
source_folder,
))
def _empty_old_media_files(source_folder):
run('cd %s && rm -rf ../media/* || true' % (
source_folder
))
| {
"content_hash": "ae40c66db3cc24d6b74fa7cb587351d7",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 81,
"avg_line_length": 33.53846153846154,
"alnum_prop": 0.6274574049803407,
"repo_name": "abrahamvarricatt/journal",
"id": "8ca0fd4983884e89416744676c9f75a93266a6d7",
"size": "3052",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deploy_tools/fabfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6684"
},
{
"name": "Python",
"bytes": "8788"
},
{
"name": "Shell",
"bytes": "230"
}
],
"symlink_target": ""
} |
"""Invenio error handlers."""
from __future__ import absolute_import, print_function
from flask import Blueprint, current_app, render_template
blueprint = Blueprint("invenio_theme_frontpage", __name__)
@blueprint.route("/")
def index():
"""Simplistic front page view."""
return render_template(
current_app.config["THEME_FRONTPAGE_TEMPLATE"],
)
def unauthorized(e):
"""Error handler to show a 401.html page in case of a 401 error."""
return render_template(current_app.config["THEME_401_TEMPLATE"]), 401
def insufficient_permissions(e):
"""Error handler to show a 403.html page in case of a 403 error."""
return render_template(current_app.config["THEME_403_TEMPLATE"]), 403
def page_not_found(e):
"""Error handler to show a 404.html page in case of a 404 error."""
return render_template(current_app.config["THEME_404_TEMPLATE"]), 404
def too_many_requests(e):
"""Error handler to show a 429.html page in case of a 429 error."""
return render_template(current_app.config["THEME_429_TEMPLATE"]), 429
def internal_error(e):
"""Error handler to show a 500.html page in case of a 500 error."""
return render_template(current_app.config["THEME_500_TEMPLATE"]), 500
| {
"content_hash": "ac1e92587f23018a0e200dadaf4d8465",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 73,
"avg_line_length": 30.9,
"alnum_prop": 0.6974110032362459,
"repo_name": "inveniosoftware/invenio-theme",
"id": "0c322a5a9713fed526729fc90b89f84225e430ea",
"size": "1471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invenio_theme/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "68659"
},
{
"name": "JavaScript",
"bytes": "8517"
},
{
"name": "Less",
"bytes": "5227"
},
{
"name": "Python",
"bytes": "47831"
},
{
"name": "SCSS",
"bytes": "15472"
},
{
"name": "Shell",
"bytes": "488"
}
],
"symlink_target": ""
} |
import argparse
import subprocess
class DatabaseImporter(object):
args = None
def __init__(self, args):
self.args = args
def postgres(self):
if not self.args.database:
raise AttributeError('No database selected to import')
port = args.port or '5432'
postfix = ' {}'.format(self.args.additional)
command = 'pg_restore -c -d {database} -U {user} -h {host} -p {port}'
command += postfix
command += ' {input}'
self(command.format(
user=self.args.user, database=self.args.database, input=self.args.input + '.sql',
host=self.args.host, port=port)
)
#self('psql {database} < {input} -U {user} -h {host} -p {port}'.format(
# user=self.args.user, database=self.args.database, input=self.args.input + '.sql',
# host=self.args.host, port=port)
#)
def mongo(self):
port = args.port or '27017'
postfix = ' {}'.format(self.args.additional)
command = 'mongorestore --drop --host {host} --port {port}'
command += postfix
command += ' {input}'
self(command.format(
host=self.args.host, port=port, input=self.args.input
))
def __call__(self, command):
if self.args.generate:
print(command)
else:
subprocess.call(command, shell=True)
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--database_engine', '-e', type=str, default='postgres')
arg_parser.add_argument('--user', '-u', type=str, default='postgres')
arg_parser.add_argument('--database', '-db', type=str, default=None)
arg_parser.add_argument('--input', '-i', type=str, default='output.backup')
arg_parser.add_argument('--host', '-ho', type=str, default='localhost')
arg_parser.add_argument('--port', '-p', type=str, default=None)
arg_parser.add_argument('--generate', '-g', type=bool, default=False,
help='generate command to execute and just print to console')
arg_parser.add_argument('--additional', '-a', type=str, default='',
help='''additional arguments use this flag with double quote. Example -a "--test true".
If you need only one flag "-t" without argument use space before: -a " -t"''')
args = arg_parser.parse_args()
exporter = DatabaseImporter(args)
for database in args.database_engine.split(','):
getattr(exporter, database)()
| {
"content_hash": "de9c8fec280d5572928ea613cfc065e6",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 90,
"avg_line_length": 37.610169491525426,
"alnum_prop": 0.6723749436683191,
"repo_name": "codecats/db_data",
"id": "96e2c7e43e1a0f55dd8b989a158650be3cc6e728",
"size": "2241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "restore.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4330"
}
],
"symlink_target": ""
} |
from dtest import Tester, debug
from tools import known_failure, since
@since('2.2')
class TestLargeColumn(Tester):
"""
Check that inserting and reading large columns to the database doesn't cause off heap memory usage
that is proportional to the size of the memory read/written.
"""
def stress_with_col_size(self, cluster, node, size):
size = str(size)
node.stress(['write', 'n=5', "no-warmup", "cl=ALL", "-pop", "seq=1...5", "-schema", "replication(factor=2)", "-col", "n=fixed(1)", "size=fixed(" + size + ")", "-rate", "threads=1"])
node.stress(['read', 'n=5', "no-warmup", "cl=ALL", "-pop", "seq=1...5", "-schema", "replication(factor=2)", "-col", "n=fixed(1)", "size=fixed(" + size + ")", "-rate", "threads=1"])
def directbytes(self, node):
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
output, err, _ = node.nodetool("gcstats")
debug(output)
output = output.split("\n")
self.assertRegexpMatches(output[0].strip(), 'Interval')
fields = output[1].split()
self.assertGreaterEqual(len(fields), 6, "Expected output from nodetool gcstats has at least six fields. However, fields is: {}".format(fields))
for field in fields:
self.assertTrue(is_number(field.strip()) or field == 'NaN', "Expected numeric from fields from nodetool gcstats. However, field.strip() is: {}".format(field.strip()))
return fields[6]
@known_failure(failure_source='cassandra',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-11937',
flaky=True,
notes='OOM on trunk')
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-11234',
flaky=False,
notes='windows')
def cleanup_test(self):
"""
@jira_ticket CASSANDRA-8670
"""
cluster = self.cluster
# Commit log segment size needs to increase for the database to be willing to accept columns that large
# internode compression is disabled because the regression being tested occurs in NIO buffer pooling without compression
cluster.set_configuration_options({'commitlog_segment_size_in_mb': 128, 'internode_compression': 'none'})
# Have Netty allocate memory on heap so it is clear if memory used for large columns is related to intracluster messaging
cluster.populate(2).start(jvm_args=[" -Dcassandra.netty_use_heap_allocator=true "])
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node1)
debug("Before stress {0}".format(self.directbytes(node1)))
debug("Running stress")
# Run the full stack to see how much memory is utilized for "small" columns
self.stress_with_col_size(cluster, node1, 1)
beforeStress = self.directbytes(node1)
debug("Ran stress once {0}".format(beforeStress))
# Now run the full stack to see how much memory is utilized for "large" columns
LARGE_COLUMN_SIZE = 1024 * 1024 * 63
self.stress_with_col_size(cluster, node1, LARGE_COLUMN_SIZE)
output, err, _ = node1.nodetool("gcstats")
afterStress = self.directbytes(node1)
debug("After stress {0}".format(afterStress))
# Any growth in memory usage should not be proportional column size. Really almost no memory should be used
# since Netty was instructed to use a heap allocator
diff = int(afterStress) - int(beforeStress)
self.assertLess(diff, LARGE_COLUMN_SIZE)
| {
"content_hash": "7521402f78240132aeb49b09587d5a3b",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 189,
"avg_line_length": 50.189189189189186,
"alnum_prop": 0.6276252019386107,
"repo_name": "thobbs/cassandra-dtest",
"id": "0b228d2167ef30c161af71d84b039454f1523edc",
"size": "3714",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "largecolumn_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2222502"
},
{
"name": "Shell",
"bytes": "1994"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.conf.urls import include, patterns, url
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
admin.autodiscover()
urlpatterns = i18n_patterns('',
url(r'^jsi18n/$', 'django.views.i18n.javascript_catalog', {
'packages': ('pyconde.core',)
}),
url(r'^admin/', include(admin.site.urls)),
url(r'^tickets/', include('pyconde.attendees.urls')),
url(r'^accounts/', include('pyconde.accounts.urls')),
url(r'^accounts/', include('userprofiles.urls')),
url(r'^reviews/', include('pyconde.reviews.urls')),
url(r'^schedule/', include('pyconde.schedule.urls')),
url(r'^proposals/', include('pyconde.proposals.urls')),
url(r'^search/', include('pyconde.search.urls')),
url(r'^sponsorship/', include('pyconde.sponsorship.urls')),
url(r'^checkin/', include('pyconde.checkin.urls')),
url(r'^', include('cms.urls')),
)
urlpatterns += patterns('',
url(r'', include('social_auth.urls')),
)
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
if settings.DEBUG:
urlpatterns += patterns('',
(r'^site_media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
)
| {
"content_hash": "c09a6972f75305ae221697f74bfc7ef6",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 67,
"avg_line_length": 31.72093023255814,
"alnum_prop": 0.6414956011730205,
"repo_name": "EuroPython/djep",
"id": "c8a93dad20a96d830ae7333b28aaf5828e800d1a",
"size": "1388",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pyconde/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "246835"
},
{
"name": "JavaScript",
"bytes": "112740"
},
{
"name": "Puppet",
"bytes": "2679"
},
{
"name": "Python",
"bytes": "1927106"
},
{
"name": "Ruby",
"bytes": "181"
},
{
"name": "Shell",
"bytes": "6515"
}
],
"symlink_target": ""
} |
from collections import defaultdict
from itertools import combinations
import numpy as np
from scipy.spatial.distance import pdist
def sq_to_dist(i, j, n):
"""Convert coordinate of square distance matrix to condensed matrix index.
The condensed version of a squareform, pairwise distance matrix is
a linearisation of the upper triangular, non-diagonal coordinates
of the squareform distance matrix. This function returns the [i, j]-th
coordinate of the condensed array.
eg. given a squareform matrix,
array([[ 0. , 10. , 22.36067977],
[ 10. , 0. , 14.14213562],
[ 22.36067977, 14.14213562, 0. ]])
The condensed version of this matrix is:
array([ 10. , 22.36067977, 14.14213562])
Parameters
----------
i : int
i-th coordinate.
j : int
j-th coordinate.
n : int
Dimension n of n*n distance matrix.
Returns
-------
index : int
Position of pairwise distance [i, j] in
condensed distance matrix.
Reference
---------
In the scipy.spatial.squareform documentation, it is shown that the
index in the condensed array is given by
{n choose 2} - {(n - i) choose 2} + (j - i - 1).
Some simple arithmetic shows that this can be expanded to the formula below.
The documentation can be found in the following link:
http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.spatial.distance.squareform.html
Examples
--------
>>> sq_to_dist(0, 1, 4)
0
>>> sq_to_dist(0, 3, 4)
2
>>> sq_to_dist(1, 2, 4)
3
"""
if i > j:
i, j = j, i
index = i * n + j - i * (i + 1) / 2 - i - 1
return int(index)
def gene_distance_score(X, collection, metric='euclidean'):
"""Find intra/inter gene distance scores between samples.
Parameters
----------
X : Data frame, shape (n_samples, n_features)
Feature data frame.
collection : list of dict
A key-value-like store mapping well-plate indices (in ``'_id'``) to
various other attributes of that sample.
metric : string, optional
Which distance measure to use when calculating distances.
Must be one of the options allowable in
scipy.spatial.distance.pdist. Default is euclidean distance.
Returns
-------
all_intragene_data : array
An 1D array with intra-gene distances (i.e. distances
between samples with the same gene knocked down).
all_intergene_data : array
An 1D array with inter-gene distances (i.e. distances
between samples with different gene knocked down).
"""
gene_dict = defaultdict(list)
for doc in collection:
gene_dict[doc['gene_name']].append(doc['_id'])
nsamples = X.shape[0]
npairs = int(nsamples * (nsamples - 1) / 2)
all_intragene_index = []
for key in gene_dict:
if len(gene_dict[key]) > 1:
indices = (X.index.get_loc(coord) for coord in gene_dict[key]
if coord in X.index)
for i, j in combinations(indices, 2):
all_intragene_index.append(sq_to_dist(i, j, X.shape[0]))
all_intragene_index.sort()
all_intergene_index = np.setdiff1d(np.arange(npairs), all_intragene_index,
assume_unique=True)
distance = pdist(X, metric)
all_intragene_data = distance[all_intragene_index]
all_intergene_data = distance[all_intergene_index]
return all_intragene_data, all_intergene_data
def _partition_range(values1, values2, n):
"""Build a partition of bins over the entire range of values1 and values2.
Parameters
----------
values1, values2 : arrays
arrays to be concatenated
n : int
number of bins
Returns
-------
partition : array
A 1D array of bin edges, of length n+1
Examples
--------
>>> d1 = np.array([3, 3, 4, 5, 6])
>>> d2 = np.array([5, 5, 5, 6, 7])
>>> _partition_range(d1, d2, 5)
array([3., 4., 5., 6., 7.])
"""
eps = 1e-30
d_max = max(np.max(values1), np.max(values2)) + eps
d_min = min(np.min(values1), np.min(values2))
partition = np.linspace(d_min, d_max, n) #or n, check this
return partition
def _empirical_distribution(values, bins):
"""Return an EDF of an input array over a given array of bin edges
Note: returns a PDF, not a CDF
Parameters
----------
values : array of float
Values of distribution to be modelled
bins : array of float
Array of bin right edge values
Returns
-------
edf : array
A probability distribution over the range of bins
"""
ind = np.digitize(values, bins)
#Note: np.digitize bin index starts from index 1
#erray returns number of times each data point occurs
edf = np.bincount(ind, minlength = len(bins) + 1)
#normalize
edf = edf / np.sum(edf)
return edf
def bhattacharyya_distance(values0, values1, n):
"""Return the Bhattacharyya coefficient of 2 input arrays
BC of 2 distributions, f(x) and g(x) is given by [1]_:
$\sum_{k=1}^n{\sqrt(f(x_i)g(x_i))}$
Parameters
----------
values0, values1 : arrays
Return BC of these 2 arrays
n : int
number of bins to partition values0 and values1 over
Returns
-------
bc : real
Bhattacharyya coefficient of values0 and values1
References
----------
..[1] Bhattacharyya, A. (1943). "On a measure of divergence between two
statistical populations defined by their probability distributions"
Bulletin of the Calcutta Mathematical Society
Examples
--------
>>> d1 = np.array([3, 3, 4, 5, 6])
>>> d2 = np.array([5, 5, 5, 6, 7])
>>> d = bhattacharyya_distance(d1, d2, 5)
>>> abs(d - 0.546) < 1e-3
True
See Also
--------
_partition_range : function
_empirical_distribution : function
"""
bins = _partition_range(values0, values1, n)
d0 = _empirical_distribution(values0, bins)
d1 = _empirical_distribution(values1, bins)
bc = np.sum(np.sqrt(d0*d1))
return bc
| {
"content_hash": "4a8d885f99ac35bc4953ac4369315f87",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 101,
"avg_line_length": 28.254545454545454,
"alnum_prop": 0.601994851994852,
"repo_name": "jni/microscopium",
"id": "4b3d559b281eb4306f3c64706aacebb36e2c9f9b",
"size": "6216",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "microscopium/metrics.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "828"
},
{
"name": "Python",
"bytes": "141971"
}
],
"symlink_target": ""
} |
import json
import datetime
from flask import request
from flask.ext.cors import cross_origin
from alerta.app import app, db
from alerta.alert import Alert
from alerta.app.utils import jsonify, jsonp, process_alert
from alerta.app.metrics import Timer
from alerta.plugins import RejectException
LOG = app.logger
webhook_timer = Timer('alerts', 'webhook', 'Web hook alerts', 'Total time to process number of web hook alerts')
duplicate_timer = Timer('alerts', 'duplicate', 'Duplicate alerts', 'Total time to process number of duplicate alerts')
correlate_timer = Timer('alerts', 'correlate', 'Correlated alerts', 'Total time to process number of correlated alerts')
create_timer = Timer('alerts', 'create', 'Newly created alerts', 'Total time to process number of new alerts')
def cw_state_to_severity(state):
if state == 'ALARM':
return 'major'
elif state == 'INSUFFICIENT_DATA':
return 'warning'
elif state == 'OK':
return 'normal'
else:
return 'unknown'
def parse_notification(notification):
notification = json.loads(notification)
if notification['Type'] == 'SubscriptionConfirmation':
return Alert(
resource=notification['TopicArn'],
event=notification['Type'],
environment='Production',
severity='informational',
service=['Unknown'],
group='AWS/CloudWatch',
text='%s <a href="%s" target="_blank">SubscribeURL</a>' % (notification['Message'], notification['SubscribeURL']),
origin=notification['TopicArn'],
event_type='cloudwatchAlarm',
create_time=datetime.datetime.strptime(notification['Timestamp'], '%Y-%m-%dT%H:%M:%S.%fZ'),
raw_data=notification,
)
elif notification['Type'] == 'Notification':
alarm = json.loads(notification['Message'])
return Alert(
resource='%s:%s' % (alarm['Trigger']['Dimensions'][0]['name'], alarm['Trigger']['Dimensions'][0]['value']),
event=alarm['Trigger']['MetricName'],
environment='Production',
severity=cw_state_to_severity(alarm['NewStateValue']),
service=[alarm['AWSAccountId']],
group=alarm['Trigger']['Namespace'],
value=alarm['NewStateValue'],
text=alarm['AlarmDescription'],
tags=[alarm['Region']],
attributes={
'incidentKey': alarm['AlarmName'],
'thresholdInfo': alarm['Trigger']
},
origin=notification['TopicArn'],
event_type='cloudwatchAlarm',
create_time=datetime.datetime.strptime(notification['Timestamp'], '%Y-%m-%dT%H:%M:%S.%fZ'),
raw_data=alarm
)
@app.route('/webhooks/cloudwatch', methods=['OPTIONS', 'POST'])
@cross_origin()
@jsonp
def cloudwatch():
hook_started = webhook_timer.start_timer()
try:
incomingAlert = parse_notification(request.data)
except ValueError, e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 400
try:
alert = process_alert(incomingAlert)
except RejectException as e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 403
except Exception as e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 500
webhook_timer.stop_timer(hook_started)
if alert:
body = alert.get_body()
body['href'] = "%s/%s" % (request.base_url, alert.id)
return jsonify(status="ok", id=alert.id, alert=body), 201, {'Location': '%s/%s' % (request.base_url, alert.id)}
else:
return jsonify(status="error", message="insert or update of cloudwatch alarm failed"), 500
def parse_pingdom(check):
check = json.loads(check)
if check['action'] == 'assign':
return Alert(
resource=check['host'],
event=check['description'],
correlate=['up', 'down'],
environment='Production',
severity='critical',
service=[check['checkname']],
group='Network',
text='%s is %s.' % (check['checkname'], check['description']),
attributes={'incidentKey': check['incidentid']},
origin='Pingdom',
event_type='availabilityAlert',
raw_data=check,
)
elif check['action'] == 'notify_of_close':
return Alert(
resource=check['host'],
event=check['description'],
correlate=['up', 'down'],
environment='Production',
severity='normal',
service=[check['checkname']],
group='Network',
text='%s is %s.' % (check['checkname'], check['description']),
attributes={'incidentKey': check['incidentid']},
origin='Pingdom',
event_type='availabilityAlert',
raw_data=check,
)
else:
return Alert(
resource=check['host'],
event=check['description'],
correlate=['up', 'down', check['description']],
environment='Production',
severity='indeterminate',
service=[check['checkname']],
group='Network',
text='%s is %s.' % (check['checkname'], check['description']),
attributes={'incidentKey': check['incidentid']},
origin='Pingdom',
event_type='availabilityAlert',
raw_data=check,
)
@app.route('/webhooks/pingdom', methods=['OPTIONS', 'GET'])
@cross_origin()
@jsonp
def pingdom():
hook_started = webhook_timer.start_timer()
try:
incomingAlert = parse_pingdom(request.args.get('message'))
except ValueError, e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 400
try:
alert = process_alert(incomingAlert)
except RejectException as e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 403
except Exception as e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 500
webhook_timer.stop_timer(hook_started)
if alert:
body = alert.get_body()
body['href'] = "%s/%s" % (request.base_url, alert.id)
return jsonify(status="ok", id=alert.id, alert=body), 201, {'Location': '%s/%s' % (request.base_url, alert.id)}
else:
return jsonify(status="error", message="insert or update of pingdom check failed"), 500
def parse_pagerduty(message):
incident_key = message['data']['incident']['incident_key']
incident_number = message['data']['incident']['incident_number']
html_url = message['data']['incident']['html_url']
incident_url = '<a href="%s">#%s</a>' % (html_url, incident_number)
try:
alert = db.get_alerts(query={'attributes.incidentKey': incident_key}, limit=1)[0]
except IndexError:
raise
from alerta.app import status_code
if message['type'] == 'incident.trigger':
status = status_code.OPEN
user = message['data']['incident']['assigned_to_user']['name']
text = 'Incident %s assigned to %s' % (incident_url, user)
elif message['type'] == 'incident.acknowledge':
status = status_code.ACK
user = message['data']['incident']['assigned_to_user']['name']
text = 'Incident %s acknowledged by %s' % (incident_url, user)
elif message['type'] == 'incident.unacknowledge':
status = status_code.OPEN
text = 'Incident %s unacknowledged due to timeout' % incident_url
elif message['type'] == 'incident.resolve':
status = status_code.CLOSED
if message['data']['incident']['resolved_by_user']:
user = message['data']['incident']['resolved_by_user']['name']
else:
user = 'n/a'
text = 'Incident %s resolved by %s' % (incident_url, user)
elif message['type'] == 'incident.assign':
status = status_code.ASSIGN
user = message['data']['incident']['assigned_to_user']['name']
text = 'Incident %s manually assigned to %s' % (incident_url, user)
elif message['type'] == 'incident.escalate':
status = status_code.OPEN
user = message['data']['incident']['assigned_to_user']['name']
text = 'Incident %s escalated to %s' % (incident_url, user)
elif message['type'] == 'incident.delegate':
status = status_code.OPEN
user = message['data']['incident']['assigned_to_user']['name']
text = 'Incident %s reassigned due to escalation to %s' % (incident_url, user)
else:
status = status_code.UNKNOWN
text = message['type']
return alert.id, status, text
@app.route('/webhooks/pagerduty', methods=['OPTIONS', 'POST'])
@cross_origin()
def pagerduty():
hook_started = webhook_timer.start_timer()
data = request.json
if data and 'messages' in data:
for message in data['messages']:
try:
id, status, text = parse_pagerduty(message)
except IndexError, e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 400
try:
alert = db.set_status(id=id, status=status, text=text)
except Exception as e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 500
else:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message="no messages in PagerDuty data payload"), 400
webhook_timer.stop_timer(hook_started)
if alert:
return jsonify(status="ok"), 200
else:
return jsonify(status="error", message="update PagerDuty incident status failed"), 500
| {
"content_hash": "fa5889cfccd1358b97051025100081e1",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 126,
"avg_line_length": 36.803703703703704,
"alnum_prop": 0.5985709972828822,
"repo_name": "mrkeng/alerta",
"id": "db9f950ef8afce78304fc8e3029e566243cec681",
"size": "9937",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alerta/app/webhooks/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "4651"
},
{
"name": "JavaScript",
"bytes": "2394"
},
{
"name": "Makefile",
"bytes": "587"
},
{
"name": "Python",
"bytes": "149682"
},
{
"name": "Shell",
"bytes": "4887"
}
],
"symlink_target": ""
} |
import os
import argparse
import logging
import subprocess
def checksum(md5sum, file):
setup_logging()
if not os.path.exists(file):
print "File not found! %s" % file
logging.info("md5sum check FAIL: %file - file not found")
exit(1)
hash = get_md5sum(file)
if not hash == md5sum:
print "md5sum check failed: %s actual: %s reported: %s" % (file, hash, md5sum)
logging.info("md5sum check FAILED: %s actual: %s reported: %s" % (file, hash, md5sum))
exit(0)
logging.info("md5sum check PASS: %s actual: %s reported: %s" % (file, hash, md5sum))
def get_md5sum(file):
cmd = ['md5sum', file]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if not p.returncode == 0:
print "md5sum hash of %s failed" % file
hash = out.split(" ")[0]
return hash
def setup_logging():
log_name = "checksum.log"
logging.basicConfig(filename=log_name,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', level=logging.INFO)
def parse_command_line():
parser = argparse.ArgumentParser(
description = 'This script transfers data from a hard drive to a local computing cluster.')
parser.add_argument("--md5sum", default=None,
help="md5sum hash to check against")
parser.add_argument("--file", default=None,
help="File to check.")
options = parser.parse_args()
if options.md5sum is None or options.file is None:
print "Exiting, must specify both options. See checksum.py --help for details."
exit(0)
return options
if __name__ == "__main__":
options = parse_command_line()
checksum(options.md5sum, options.file) | {
"content_hash": "90d79af074133a217256ffd2a8d365ad",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 109,
"avg_line_length": 35.333333333333336,
"alnum_prop": 0.6159822419533851,
"repo_name": "StanfordBioinformatics/qualia",
"id": "2ca2a93076b8da9d7ad5934dea5a810f8620ff3b",
"size": "1905",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bin/checksum.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9456"
},
{
"name": "R",
"bytes": "10167"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from timeit import Timer
from pyprimesieve import primes, primes_sum
def sumofprimes(n): # lambda expression is slower
return sum(primes(n))
if __name__ == "__main__":
for n in range(5, 9):
print('10**{}'.format(n))
for fn in ['primes_sum', 'sumofprimes']:
timer = Timer(stmt='{}({})'.format(fn, 10**n), setup='from __main__ import {}'.format(fn))
timer = min(timer.repeat(repeat=12, number=1)) * 10**3
print(fn, timer)
print('')
| {
"content_hash": "8097d79cfe437c1d23e2be339ffbfdfa",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 102,
"avg_line_length": 33.75,
"alnum_prop": 0.5777777777777777,
"repo_name": "anirudhjayaraman/pyprimesieve",
"id": "255543ebd98ebe9ea6a60258eed4a222476ef432",
"size": "563",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bench/bench_sum_primes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "122732"
},
{
"name": "Python",
"bytes": "26510"
}
],
"symlink_target": ""
} |
# Copyright (c) 2015, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import time
from mcrouter.test.MCProcess import Memcached
from mcrouter.test.McrouterTestCase import McrouterTestCase
class TestMcrouterRoutingPrefixAscii(McrouterTestCase):
config = './mcrouter/test/routing_prefix_test_ascii.json'
extra_args = []
def setUp(self):
# The order here must corresponds to the order of hosts in the .json
self.allhosts = []
for i in range(0, 4):
self.allhosts.append(self.add_server(Memcached()))
def get_mcrouter(self):
return self.add_mcrouter(
self.config, '/a/a/', extra_args=self.extra_args)
def test_routing_prefix(self):
mcr = self.get_mcrouter()
nclusters = len(self.allhosts)
# first try setting a key to the local cluster
mcr.set("testkeylocal", "testvalue")
self.assertEqual(self.allhosts[0].get("testkeylocal"), "testvalue")
for i in range(1, nclusters):
self.assertIsNone(self.allhosts[i].get("testkeylocal"))
mcr.set("/*/*/testkey-routing", "testvalue")
# /*/*/ is all-fastest, and some requests might complete asynchronously.
# As a workaround, just wait
time.sleep(1)
local = self.allhosts[0].get("testkey-routing", True)
self.assertEqual(local["value"], "testvalue")
# make sure the key got set as "/*/*/key"
for i in range(1, nclusters):
local = self.allhosts[i].get("/*/*/testkey-routing", True)
self.assertEqual(local["value"], "testvalue")
class TestMcrouterRoutingPrefixUmbrella(TestMcrouterRoutingPrefixAscii):
config = './mcrouter/test/routing_prefix_test_umbrella.json'
class TestMcrouterRoutingPrefixOldNaming(TestMcrouterRoutingPrefixAscii):
config = './mcrouter/test/routing_prefix_test_old_naming.json'
class TestCustomRoutingPrefixes(McrouterTestCase):
config = './mcrouter/test/routing_prefix_test_custom.json'
extra_args = []
def setUp(self):
self.aa = self.add_server(Memcached())
self.ab = self.add_server(Memcached())
self.ba = self.add_server(Memcached())
self.bb = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(
self.config, '/a/a/', extra_args=self.extra_args)
def test_custom_routing_prefix(self):
mcr = self.get_mcrouter()
key = "/*/a/key"
value = "value"
mcr.set(key, value)
time.sleep(1)
self.assertEqual(self.aa.get('key'), value)
self.assertEqual(self.ba.get('key'), value)
key = "/b*/*/key"
value = "value2"
mcr.set(key, value)
time.sleep(1)
self.assertEqual(self.ba.get('key'), value)
self.assertEqual(self.bb.get('key'), value)
key = "/b/*b*/key"
value = "value3"
mcr.set(key, value)
self.assertEqual(self.bb.get('key'), value)
| {
"content_hash": "febdd01e43bf7c62bf90f2e7f8c526b7",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 80,
"avg_line_length": 35.07368421052632,
"alnum_prop": 0.6449579831932774,
"repo_name": "tempbottle/mcrouter",
"id": "08ca3154f31ffa90f42e6ad3f7c08571a09151d0",
"size": "3332",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "mcrouter/test/test_routing_prefixes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "370515"
},
{
"name": "C++",
"bytes": "1393986"
},
{
"name": "Python",
"bytes": "157324"
},
{
"name": "Ragel in Ruby Host",
"bytes": "14897"
},
{
"name": "Shell",
"bytes": "8553"
}
],
"symlink_target": ""
} |
"""significant_figures_calculator URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
#from significant_figures_calculator.views import post_demo
from significant_figures_calculator.views import *
urlpatterns = [
url(r'^admin/', admin.site.urls),
#url(r'^post/$', post_demo),
url(r'^add/$', add),
#url(r'^sub/$', sub),
#url(r'^mul/$', mul),
url(r'^acceleration/$', acceleration),
]
| {
"content_hash": "6db063a4f525b4013408c2b699f8ff53",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 79,
"avg_line_length": 37.5,
"alnum_prop": 0.6885714285714286,
"repo_name": "Rezztech/significant-figures-calculator",
"id": "0b4286ad818ec1f242c0ea53e28f65cdbea8275e",
"size": "1050",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "significant_figures_calculator/significant_figures_calculator/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "777"
},
{
"name": "HTML",
"bytes": "19606"
},
{
"name": "JavaScript",
"bytes": "1009"
},
{
"name": "Python",
"bytes": "13424"
}
],
"symlink_target": ""
} |
"""
sentry.web.urls
~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
from django.conf.urls.defaults import *
from django.views.defaults import page_not_found
from sentry.conf.settings import KEY
from sentry.web import views, feeds
handler404 = lambda x: page_not_found(x, template_name='sentry/404.html')
def handler500(request):
"""
500 error handler.
Templates: `500.html`
Context: None
"""
from django.template import Context, loader
from django.http import HttpResponseServerError
context = {'request': request}
t = loader.get_template('sentry/500.html')
return HttpResponseServerError(t.render(Context(context)))
urlpatterns = patterns('',
url(r'^_static/(?P<path>.*)$', views.static_media, name='sentry-media'),
# Feeds
url(r'^feeds/messages.xml$', feeds.MessageFeed(), name='sentry-feed-messages'),
url(r'^feeds/summaries.xml$', feeds.SummaryFeed(), name='sentry-feed-summaries'),
# JS and API
url(r'^jsapi/$', views.ajax_handler, name='sentry-ajax'),
url(r'^store/$', views.store, name='sentry-store'),
# Normal views
url(r'^login$', views.login, name='sentry-login'),
url(r'^logout$', views.logout, name='sentry-logout'),
url(r'^group/(\d+)$', views.group, name='sentry-group'),
url(r'^group/(\d+)/messages$', views.group_message_list, name='sentry-group-messages'),
url(r'^group/(\d+)/messages/(\d+)$', views.group_message_details, name='sentry-group-message'),
url(r'^group/(\d+)/actions/([\w_-]+)', views.group_plugin_action, name='sentry-group-plugin-action'),
url(r'^search$', views.search, name='sentry-search'),
url(r'^$', views.index, name='sentry'),
)
| {
"content_hash": "5bcdf295bbcf38eaf6b07c9a659f5e39",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 105,
"avg_line_length": 30.322033898305083,
"alnum_prop": 0.660145332588038,
"repo_name": "joshjo/django-sentry",
"id": "e00668fef5d6748f7dcd0a11511e5daab545d1cf",
"size": "1789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sentry/web/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21335"
},
{
"name": "JavaScript",
"bytes": "10544"
},
{
"name": "Python",
"bytes": "223377"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from apiclient import discovery
from httplib2 import Http
from oauth2client import client, file, tools
SCOPES = "https://www.googleapis.com/auth/forms.body"
DISCOVERY_DOC = "https://forms.googleapis.com/$discovery/rest?version=v1"
store = file.Storage('token.json')
creds = None
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('client_secrets.json', SCOPES)
creds = tools.run_flow(flow, store)
form_service = discovery.build('forms', 'v1', http=creds.authorize(
Http()), discoveryServiceUrl=DISCOVERY_DOC, static_discovery=False)
form = {
"info": {
"title": "Update item example for Forms API",
}
}
# Creates the initial Form
createResult = form_service.forms().create(body=form).execute()
# Request body to add a video item to a Form
update = {
"requests": [{
"createItem": {
"item": {
"title": "Homework video",
"description": "Quizzes in Google Forms",
"videoItem": {
"video": {
"youtubeUri": "https://www.youtube.com/watch?v=Lt5HqPvM-eI"
}
}
},
"location": {
"index": 0
}
}
}
]
}
# Add the video to the form
question_setting = form_service.forms().batchUpdate(
formId=createResult["formId"], body=update).execute()
# Print the result to see it now has a video
result = form_service.forms().get(formId=createResult["formId"]).execute()
print(result)
# [END forms_add_item]
| {
"content_hash": "8643e1d6a18a1de8b928456e2cc11e0d",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 83,
"avg_line_length": 28.5,
"alnum_prop": 0.6058897243107769,
"repo_name": "googleworkspace/python-samples",
"id": "964404b99da59f91a4d18b9900398761fd8e2c02",
"size": "2197",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "forms/snippets/add_item.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "401984"
}
],
"symlink_target": ""
} |
import datetime
import os
import unittest
from airflow import settings
from airflow.configuration import conf
from airflow.models import DAG, TaskInstance as TI, XCom, clear_task_instances
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils import timezone
from airflow.utils.session import create_session
from airflow.utils.state import State
from tests.models import DEFAULT_DATE
class TestClearTasks(unittest.TestCase):
def tearDown(self):
with create_session() as session:
session.query(TI).delete()
def test_clear_task_instances(self):
dag = DAG('test_clear_task_instances', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
task0 = DummyOperator(task_id='0', owner='test', dag=dag)
task1 = DummyOperator(task_id='1', owner='test', dag=dag, retries=2)
ti0 = TI(task=task0, execution_date=DEFAULT_DATE)
ti1 = TI(task=task1, execution_date=DEFAULT_DATE)
ti0.run()
ti1.run()
with create_session() as session:
qry = session.query(TI).filter(
TI.dag_id == dag.dag_id).all()
clear_task_instances(qry, session, dag=dag)
ti0.refresh_from_db()
ti1.refresh_from_db()
# Next try to run will be try 2
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.max_tries, 1)
self.assertEqual(ti1.try_number, 2)
self.assertEqual(ti1.max_tries, 3)
def test_clear_task_instances_without_task(self):
dag = DAG('test_clear_task_instances_without_task', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
task0 = DummyOperator(task_id='task0', owner='test', dag=dag)
task1 = DummyOperator(task_id='task1', owner='test', dag=dag, retries=2)
ti0 = TI(task=task0, execution_date=DEFAULT_DATE)
ti1 = TI(task=task1, execution_date=DEFAULT_DATE)
ti0.run()
ti1.run()
# Remove the task from dag.
dag.task_dict = {}
self.assertFalse(dag.has_task(task0.task_id))
self.assertFalse(dag.has_task(task1.task_id))
with create_session() as session:
qry = session.query(TI).filter(
TI.dag_id == dag.dag_id).all()
clear_task_instances(qry, session)
# When dag is None, max_tries will be maximum of original max_tries or try_number.
ti0.refresh_from_db()
ti1.refresh_from_db()
# Next try to run will be try 2
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.max_tries, 1)
self.assertEqual(ti1.try_number, 2)
self.assertEqual(ti1.max_tries, 2)
def test_clear_task_instances_without_dag(self):
dag = DAG('test_clear_task_instances_without_dag', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
task0 = DummyOperator(task_id='task_0', owner='test', dag=dag)
task1 = DummyOperator(task_id='task_1', owner='test', dag=dag, retries=2)
ti0 = TI(task=task0, execution_date=DEFAULT_DATE)
ti1 = TI(task=task1, execution_date=DEFAULT_DATE)
ti0.run()
ti1.run()
with create_session() as session:
qry = session.query(TI).filter(
TI.dag_id == dag.dag_id).all()
clear_task_instances(qry, session)
# When dag is None, max_tries will be maximum of original max_tries or try_number.
ti0.refresh_from_db()
ti1.refresh_from_db()
# Next try to run will be try 2
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.max_tries, 1)
self.assertEqual(ti1.try_number, 2)
self.assertEqual(ti1.max_tries, 2)
def test_dag_clear(self):
dag = DAG('test_dag_clear', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
task0 = DummyOperator(task_id='test_dag_clear_task_0', owner='test', dag=dag)
ti0 = TI(task=task0, execution_date=DEFAULT_DATE)
# Next try to run will be try 1
self.assertEqual(ti0.try_number, 1)
ti0.run()
self.assertEqual(ti0.try_number, 2)
dag.clear()
ti0.refresh_from_db()
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.state, State.NONE)
self.assertEqual(ti0.max_tries, 1)
task1 = DummyOperator(task_id='test_dag_clear_task_1', owner='test',
dag=dag, retries=2)
ti1 = TI(task=task1, execution_date=DEFAULT_DATE)
self.assertEqual(ti1.max_tries, 2)
ti1.try_number = 1
# Next try will be 2
ti1.run()
self.assertEqual(ti1.try_number, 3)
self.assertEqual(ti1.max_tries, 2)
dag.clear()
ti0.refresh_from_db()
ti1.refresh_from_db()
# after clear dag, ti2 should show attempt 3 of 5
self.assertEqual(ti1.max_tries, 4)
self.assertEqual(ti1.try_number, 3)
# after clear dag, ti1 should show attempt 2 of 2
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.max_tries, 1)
def test_dags_clear(self):
# setup
session = settings.Session()
dags, tis = [], []
num_of_dags = 5
for i in range(num_of_dags):
dag = DAG('test_dag_clear_' + str(i), start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
ti = TI(task=DummyOperator(task_id='test_task_clear_' + str(i), owner='test',
dag=dag),
execution_date=DEFAULT_DATE)
dags.append(dag)
tis.append(ti)
# test clear all dags
for i in range(num_of_dags):
tis[i].run()
self.assertEqual(tis[i].state, State.SUCCESS)
self.assertEqual(tis[i].try_number, 2)
self.assertEqual(tis[i].max_tries, 0)
DAG.clear_dags(dags)
for i in range(num_of_dags):
tis[i].refresh_from_db()
self.assertEqual(tis[i].state, State.NONE)
self.assertEqual(tis[i].try_number, 2)
self.assertEqual(tis[i].max_tries, 1)
# test dry_run
for i in range(num_of_dags):
tis[i].run()
self.assertEqual(tis[i].state, State.SUCCESS)
self.assertEqual(tis[i].try_number, 3)
self.assertEqual(tis[i].max_tries, 1)
DAG.clear_dags(dags, dry_run=True)
for i in range(num_of_dags):
tis[i].refresh_from_db()
self.assertEqual(tis[i].state, State.SUCCESS)
self.assertEqual(tis[i].try_number, 3)
self.assertEqual(tis[i].max_tries, 1)
# test only_failed
from random import randint
failed_dag_idx = randint(0, len(tis) - 1)
tis[failed_dag_idx].state = State.FAILED
session.merge(tis[failed_dag_idx])
session.commit()
DAG.clear_dags(dags, only_failed=True)
for i in range(num_of_dags):
tis[i].refresh_from_db()
if i != failed_dag_idx:
self.assertEqual(tis[i].state, State.SUCCESS)
self.assertEqual(tis[i].try_number, 3)
self.assertEqual(tis[i].max_tries, 1)
else:
self.assertEqual(tis[i].state, State.NONE)
self.assertEqual(tis[i].try_number, 3)
self.assertEqual(tis[i].max_tries, 2)
def test_operator_clear(self):
dag = DAG('test_operator_clear', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
op1 = DummyOperator(task_id='bash_op', owner='test', dag=dag)
op2 = DummyOperator(task_id='dummy_op', owner='test', dag=dag, retries=1)
op2.set_upstream(op1)
ti1 = TI(task=op1, execution_date=DEFAULT_DATE)
ti2 = TI(task=op2, execution_date=DEFAULT_DATE)
ti2.run()
# Dependency not met
self.assertEqual(ti2.try_number, 1)
self.assertEqual(ti2.max_tries, 1)
op2.clear(upstream=True)
ti1.run()
ti2.run()
self.assertEqual(ti1.try_number, 2)
# max_tries is 0 because there is no task instance in db for ti1
# so clear won't change the max_tries.
self.assertEqual(ti1.max_tries, 0)
self.assertEqual(ti2.try_number, 2)
# try_number (0) + retries(1)
self.assertEqual(ti2.max_tries, 1)
def test_xcom_disable_pickle_type(self):
json_obj = {"key": "value"}
execution_date = timezone.utcnow()
key = "xcom_test1"
dag_id = "test_dag1"
task_id = "test_task1"
conf.set("core", "enable_xcom_pickling", "False")
XCom.set(key=key,
value=json_obj,
dag_id=dag_id,
task_id=task_id,
execution_date=execution_date)
ret_value = XCom.get_many(key=key,
dag_ids=dag_id,
task_ids=task_id,
execution_date=execution_date).first().value
self.assertEqual(ret_value, json_obj)
session = settings.Session()
ret_value = session.query(XCom).filter(XCom.key == key, XCom.dag_id == dag_id,
XCom.task_id == task_id,
XCom.execution_date == execution_date
).first().value
self.assertEqual(ret_value, json_obj)
def test_xcom_enable_pickle_type(self):
json_obj = {"key": "value"}
execution_date = timezone.utcnow()
key = "xcom_test2"
dag_id = "test_dag2"
task_id = "test_task2"
conf.set("core", "enable_xcom_pickling", "True")
XCom.set(key=key,
value=json_obj,
dag_id=dag_id,
task_id=task_id,
execution_date=execution_date)
ret_value = XCom.get_many(key=key,
dag_ids=dag_id,
task_ids=task_id,
execution_date=execution_date).first().value
self.assertEqual(ret_value, json_obj)
session = settings.Session()
ret_value = session.query(XCom).filter(XCom.key == key, XCom.dag_id == dag_id,
XCom.task_id == task_id,
XCom.execution_date == execution_date
).first().value
self.assertEqual(ret_value, json_obj)
def test_xcom_disable_pickle_type_fail_on_non_json(self):
class PickleRce:
def __reduce__(self):
return os.system, ("ls -alt",)
conf.set("core", "xcom_enable_pickling", "False")
self.assertRaises(TypeError, XCom.set,
key="xcom_test3",
value=PickleRce(),
dag_id="test_dag3",
task_id="test_task3",
execution_date=timezone.utcnow())
def test_xcom_get_many(self):
json_obj = {"key": "value"}
execution_date = timezone.utcnow()
key = "xcom_test4"
dag_id1 = "test_dag4"
task_id1 = "test_task4"
dag_id2 = "test_dag5"
task_id2 = "test_task5"
conf.set("core", "xcom_enable_pickling", "True")
XCom.set(key=key,
value=json_obj,
dag_id=dag_id1,
task_id=task_id1,
execution_date=execution_date)
XCom.set(key=key,
value=json_obj,
dag_id=dag_id2,
task_id=task_id2,
execution_date=execution_date)
results = XCom.get_many(key=key, execution_date=execution_date)
for result in results:
self.assertEqual(result.value, json_obj)
| {
"content_hash": "ef564111fb49f6c13a781d44a7606a4d",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 90,
"avg_line_length": 37.63354037267081,
"alnum_prop": 0.5548770424162403,
"repo_name": "wileeam/airflow",
"id": "b3ec17bddc8008e980184393854998ecbe314615",
"size": "12906",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/models/test_cleartasks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17179"
},
{
"name": "HTML",
"bytes": "148281"
},
{
"name": "JavaScript",
"bytes": "25233"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "9763694"
},
{
"name": "Shell",
"bytes": "221331"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
''':mod:`yakonfig` declarations for rejester.
.. This software is released under an MIT/X11 open source license.
Copyright 2012-2014 Diffeo, Inc.
'''
from __future__ import absolute_import
import os
import yakonfig
config_name = 'rejester'
default_config = {
'app_name': 'rejester',
'worker': 'fork_worker',
'enough_memory': False,
'default_lifetime': 1500,
}
def add_arguments(parser):
parser.add_argument('--app-name',
help='name of app for namespace prefixing')
parser.add_argument('--namespace',
help='namespace for prefixing table names')
parser.add_argument('--registry-address', metavar='HOST:PORT',
action='append', dest='registry_addresses',
help='location of the Redis registry server')
runtime_keys = {
'app_name': 'app_name',
'registry_addresses': 'registry_addresses',
'namespace': 'namespace',
}
def discover_config(config, name):
if 'registry_addresses' not in config:
addr = os.environ.get('REDIS_PORT_6379_TCP_ADDR', None)
port = os.environ.get('REDIS_PORT_6379_TCP_PORT', None)
if addr and port:
config['registry_addresses'] = [addr + ':' + port]
def check_config(config, name):
for k in ['registry_addresses', 'app_name', 'namespace']:
if k not in config or config[k] is None:
raise yakonfig.ConfigurationError(
'{0} requires configuration for {1}'
.format(name, k))
if len(config['registry_addresses']) == 0:
raise yakonfig.ConfigurationError(
'{0} requires at least one registry_addresses'
.format(name))
for addr in config['registry_addresses']:
if ':' not in addr:
raise yakonfig.ConfigurationError(
'{0} registry_addresses must be HOST:PORT, not {1!r}'
.format(name, addr))
| {
"content_hash": "6cf0a7bdd9c544d678a4811d242929b9",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 69,
"avg_line_length": 35.127272727272725,
"alnum_prop": 0.6040372670807453,
"repo_name": "diffeo/rejester",
"id": "4e71b49e10b9ca053e22351deba3291ee8503412",
"size": "1932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rejester/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "552"
},
{
"name": "Python",
"bytes": "328724"
}
],
"symlink_target": ""
} |
from .curses_menu import CursesMenu
from .curses_menu import clear_terminal
from .selection_menu import SelectionMenu
from .multi_selection_menu import MultiSelect
from . import items
from .version import __version__
__all__ = ['CursesMenu', 'SelectionMenu', 'MultiSelect', 'items', 'clear_terminal']
| {
"content_hash": "0af47d80ce6990fb21ed87b870eb0527",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 83,
"avg_line_length": 37.75,
"alnum_prop": 0.7682119205298014,
"repo_name": "mholgatem/GPIOnext",
"id": "42d612073481820f56c8a014bbb3f171434bbb69",
"size": "302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cursesmenu/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "65277"
},
{
"name": "Shell",
"bytes": "5243"
}
],
"symlink_target": ""
} |
"""
Views for managing database clusters.
"""
from collections import OrderedDict
import logging
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
import six
from horizon import exceptions
from horizon import forms as horizon_forms
from horizon import tables as horizon_tables
from horizon import tabs as horizon_tabs
from horizon.utils import memoized
from trove_dashboard import api
from trove_dashboard.content.database_clusters \
import cluster_manager
from trove_dashboard.content.database_clusters import forms
from trove_dashboard.content.database_clusters import tables
from trove_dashboard.content.database_clusters import tabs
LOG = logging.getLogger(__name__)
class IndexView(horizon_tables.DataTableView):
table_class = tables.ClustersTable
template_name = 'project/database_clusters/index.html'
def has_more_data(self, table):
return self._more
@memoized.memoized_method
def get_flavors(self):
try:
flavors = api.trove.flavor_list(self.request)
except Exception:
flavors = []
msg = _('Unable to retrieve database size information.')
exceptions.handle(self.request, msg)
return OrderedDict((six.text_type(flavor.id), flavor)
for flavor in flavors)
def _extra_data(self, cluster):
try:
cluster_flavor = cluster.instances[0]["flavor"]["id"]
flavors = self.get_flavors()
flavor = flavors.get(cluster_flavor)
if flavor is not None:
cluster.full_flavor = flavor
except Exception:
# ignore any errors and just return cluster unaltered
pass
return cluster
def get_data(self):
marker = self.request.GET.get(
tables.ClustersTable._meta.pagination_param)
# Gather our clusters
try:
clusters = api.trove.cluster_list(self.request, marker=marker)
self._more = clusters.next or False
except Exception:
self._more = False
clusters = []
msg = _('Unable to retrieve database clusters.')
exceptions.handle(self.request, msg)
map(self._extra_data, clusters)
return clusters
class LaunchClusterView(horizon_forms.ModalFormView):
form_class = forms.LaunchForm
form_id = "launch_form"
modal_header = _("Launch Cluster")
modal_id = "launch_modal"
template_name = 'project/database_clusters/launch.html'
submit_label = _("Launch")
submit_url = reverse_lazy('horizon:project:database_clusters:launch')
success_url = reverse_lazy('horizon:project:database_clusters:index')
class DetailView(horizon_tabs.TabbedTableView):
tab_group_class = tabs.ClusterDetailTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ cluster.name|default:cluster.id }}"
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
context["url"] = reverse('horizon:project:database_clusters:index')
context["cluster"] = self.get_data()
return context
@memoized.memoized_method
def get_data(self):
try:
cluster_id = self.kwargs['cluster_id']
cluster = api.trove.cluster_get(self.request, cluster_id)
except Exception:
redirect = reverse('horizon:project:database_clusters:index')
msg = _('Unable to retrieve details '
'for database cluster: %s') % cluster_id
exceptions.handle(self.request, msg, redirect=redirect)
try:
cluster.full_flavor = api.trove.flavor_get(
self.request, cluster.instances[0]["flavor"]["id"])
except Exception:
LOG.error('Unable to retrieve flavor details'
' for database cluster: %s' % cluster_id)
cluster.num_instances = len(cluster.instances)
# Todo(saurabhs) Set mgmt_url to dispaly Mgmt Console URL on
# cluster details page
# for instance in cluster.instances:
# if instance['type'] == "master":
# cluster.mgmt_url = "https://%s:5450/webui" % instance['ip'][0]
return cluster
def get_tabs(self, request, *args, **kwargs):
cluster = self.get_data()
return self.tab_group_class(request, cluster=cluster, **kwargs)
class ClusterGrowView(horizon_tables.DataTableView):
table_class = tables.ClusterGrowInstancesTable
template_name = 'project/database_clusters/cluster_grow_details.html'
page_title = _("Grow Cluster: {{cluster_name}}")
def get_data(self):
manager = cluster_manager.get(self.kwargs['cluster_id'])
return manager.get_instances()
def get_context_data(self, **kwargs):
context = super(ClusterGrowView, self).get_context_data(**kwargs)
context['cluster_id'] = self.kwargs['cluster_id']
cluster = self.get_cluster(self.kwargs['cluster_id'])
context['cluster_name'] = cluster.name
return context
@memoized.memoized_method
def get_cluster(self, cluster_id):
try:
return api.trove.cluster_get(self.request, cluster_id)
except Exception:
redirect = reverse("horizon:project:database_clusters:index")
msg = _('Unable to retrieve cluster details.')
exceptions.handle(self.request, msg, redirect=redirect)
class ClusterAddInstancesView(horizon_forms.ModalFormView):
form_class = forms.ClusterAddInstanceForm
form_id = "cluster_add_instances_form"
modal_header = _("Add Instance")
modal_id = "cluster_add_instances_modal"
template_name = "project/database_clusters/add_instance.html"
submit_label = _("Add")
submit_url = "horizon:project:database_clusters:add_instance"
success_url = "horizon:project:database_clusters:cluster_grow_details"
cancel_url = "horizon:project:database_clusters:cluster_grow_details"
page_title = _("Add Instance")
def get_context_data(self, **kwargs):
context = (super(ClusterAddInstancesView, self)
.get_context_data(**kwargs))
context['cluster_id'] = self.kwargs['cluster_id']
args = (self.kwargs['cluster_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
initial = super(ClusterAddInstancesView, self).get_initial()
initial['cluster_id'] = self.kwargs['cluster_id']
return initial
def get_success_url(self):
return reverse(self.success_url, args=[self.kwargs['cluster_id']])
def get_cancel_url(self):
return reverse(self.cancel_url, args=[self.kwargs['cluster_id']])
class ClusterInstance(object):
def __init__(self, id, name, status):
self.id = id
self.name = name
self.status = status
class ClusterShrinkView(horizon_tables.DataTableView):
table_class = tables.ClusterShrinkInstancesTable
template_name = "project/database_clusters/cluster_shrink_details.html"
page_title = _("Shrink Cluster: {{cluster_name}}")
@memoized.memoized_method
def get_cluster(self, cluster_id):
try:
return api.trove.cluster_get(self.request, cluster_id)
except Exception:
redirect = reverse("horizon:project:database_clusters:index")
msg = _('Unable to retrieve cluster details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_data(self):
cluster = self.get_cluster(self.kwargs['cluster_id'])
instances = [ClusterInstance(i['id'], i['name'], i['status'])
for i in cluster.instances]
return instances
def get_context_data(self, **kwargs):
context = super(ClusterShrinkView, self).get_context_data(**kwargs)
context['cluster_id'] = self.kwargs['cluster_id']
cluster = self.get_cluster(self.kwargs['cluster_id'])
context['cluster_name'] = cluster.name
return context
class ResetPasswordView(horizon_forms.ModalFormView):
form_class = forms.ResetPasswordForm
template_name = 'project/database_clusters/reset_password.html'
success_url = reverse_lazy('horizon:project:database_clusters:index')
page_title = _("Reset Root Password")
@memoized.memoized_method
def get_object(self, *args, **kwargs):
cluster_id = self.kwargs['cluster_id']
try:
return api.trove.cluster_get(self.request, cluster_id)
except Exception:
msg = _('Unable to retrieve cluster details.')
redirect = reverse('horizon:project:database_clusters:index')
exceptions.handle(self.request, msg, redirect=redirect)
def get_context_data(self, **kwargs):
context = super(ResetPasswordView, self).get_context_data(**kwargs)
context['cluster_id'] = self.kwargs['cluster_id']
return context
def get_initial(self):
return {'cluster_id': self.kwargs['cluster_id']}
| {
"content_hash": "caabe11511e6b190b3426c322ffd67f7",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 78,
"avg_line_length": 37.2520325203252,
"alnum_prop": 0.6515713662156264,
"repo_name": "Tesora-Release/tesora-trove-dashboard",
"id": "43cf729320b2eff695e9a19bd743809c7fb6783b",
"size": "9842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trove_dashboard/content/database_clusters/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4087"
},
{
"name": "HTML",
"bytes": "33250"
},
{
"name": "JavaScript",
"bytes": "1933"
},
{
"name": "Python",
"bytes": "432226"
},
{
"name": "Shell",
"bytes": "18300"
}
],
"symlink_target": ""
} |
from utils import calculate_bytecode, to_wei
scenario_description = (
"The DAO spent all its money and has to resort to retrieving money from "
"the extra balance account. This test checks that this is succesful."
)
def run(ctx):
ctx.assert_scenario_ran('spendall')
extra_balance_ether_to_get = 5
bytecode = calculate_bytecode(
'payOut',
('address', ctx.dao_address),
('uint256', to_wei(extra_balance_ether_to_get))
)
ctx.create_js_file(substitutions={
"dao_abi": ctx.dao_abi,
"dao_address": ctx.dao_address,
"proposal_deposit": ctx.args.proposal_deposit,
"debating_period": ctx.args.proposal_debate_seconds,
"transaction_bytecode": bytecode
})
ctx.execute(expected={
"dao_balance_diff_after_claim": extra_balance_ether_to_get
})
| {
"content_hash": "1b76e8f7b02449fc14dd1c94cd7d62ed",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 77,
"avg_line_length": 31.333333333333332,
"alnum_prop": 0.648936170212766,
"repo_name": "Spreadway/core",
"id": "38f02336352f10725b1f32ddedb7f2c124a1ae80",
"size": "846",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "DAO-develop/tests/scenarios/extrabalance/run.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2950"
},
{
"name": "HTML",
"bytes": "5341"
},
{
"name": "JavaScript",
"bytes": "695229"
},
{
"name": "Jupyter Notebook",
"bytes": "11712"
},
{
"name": "Python",
"bytes": "102340"
},
{
"name": "TeX",
"bytes": "340903"
}
],
"symlink_target": ""
} |
"""Import core names of TensorFlow.
Programs that want to build TensorFlow Ops and Graphs without having to import
the constructors and utilities individually can import this file:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
"""
import ctypes
import importlib
import sys
import traceback
# TODO(drpng): write up instructions for editing this file in a doc and point to
# the doc instead.
# If you want to edit this file to expose modules in public tensorflow API, you
# need to follow these steps:
# 1. Consult with tensorflow team and get approval for adding a new API to the
# public interface.
# 2. Document the module in the gen_docs_combined.py.
# 3. Import the module in the main tensorflow namespace by adding an import
# statement in this file.
# 4. Sanitize the entry point by making sure that your module does not expose
# transitively imported modules used for implementation, such as os, sys.
# go/tf-wildcard-import
# pylint: disable=wildcard-import,g-bad-import-order,g-import-not-at-top
import numpy as np
from tensorflow.python import pywrap_tensorflow
# Protocol buffers
from tensorflow.core.framework.graph_pb2 import *
from tensorflow.core.framework.node_def_pb2 import *
from tensorflow.core.framework.summary_pb2 import *
from tensorflow.core.framework.attr_value_pb2 import *
from tensorflow.core.protobuf.meta_graph_pb2 import TensorInfo
from tensorflow.core.protobuf.meta_graph_pb2 import MetaGraphDef
from tensorflow.core.protobuf.config_pb2 import *
from tensorflow.core.protobuf.tensorflow_server_pb2 import *
from tensorflow.core.util.event_pb2 import *
# Framework
from tensorflow.python.framework.framework_lib import *
from tensorflow.python.framework.versions import *
from tensorflow.python.framework import errors
from tensorflow.python.framework import graph_util
# Session
from tensorflow.python.client.client_lib import *
# Ops
from tensorflow.python.ops.standard_ops import *
# Namespaces
from tensorflow.python.ops import initializers_ns as initializers
# pylint: enable=wildcard-import
# Bring in subpackages.
from tensorflow.python import data
from tensorflow.python import keras
from tensorflow.python.estimator import estimator_lib as estimator
from tensorflow.python.feature_column import feature_column_lib as feature_column
from tensorflow.python.layers import layers
from tensorflow.python.ops import bitwise_ops as bitwise
from tensorflow.python.ops import image_ops as image
from tensorflow.python.ops import metrics
from tensorflow.python.ops import nn
from tensorflow.python.ops import sets
from tensorflow.python.ops import spectral_ops as spectral
from tensorflow.python.ops.distributions import distributions
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.ops.losses import losses
from tensorflow.python.profiler import profiler
from tensorflow.python.saved_model import saved_model
from tensorflow.python.summary import summary
from tensorflow.python.user_ops import user_ops
from tensorflow.python.util import compat
# Import the names from python/training.py as train.Name.
from tensorflow.python.training import training as train
# Sub-package for performing i/o directly instead of via ops in a graph.
from tensorflow.python.lib.io import python_io
# Make some application and test modules available.
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import sysconfig
from tensorflow.python.platform import test
from tensorflow.python.util.all_util import remove_undocumented
from tensorflow.python.util.all_util import make_all
# Import modules whose docstrings contribute, for use by remove_undocumented
# below.
from tensorflow.python.client import client_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import framework_lib
from tensorflow.python.framework import subscribe
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import confusion_matrix as confusion_matrix_m
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import histogram_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import session_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import tensor_array_ops
# Symbols whitelisted for export without documentation.
# TODO(cwhipkey): review these and move to contrib, expose through
# documentation, or remove.
_allowed_symbols = [
'AttrValue',
'ConfigProto',
'ClusterDef',
'DeviceSpec',
'Event',
'GPUOptions',
'GRAPH_DEF_VERSION',
'GRAPH_DEF_VERSION_MIN_CONSUMER',
'GRAPH_DEF_VERSION_MIN_PRODUCER',
'GraphDef',
'GraphOptions',
'HistogramProto',
'LogMessage',
'MetaGraphDef',
'NameAttrList',
'NodeDef',
'OptimizerOptions',
'RunOptions',
'RunMetadata',
'SessionLog',
'Summary',
'SummaryMetadata',
'TensorInfo', # Used for tf.saved_model functionality.
]
# The following symbols are kept for compatibility. It is our plan
# to remove them in the future.
_allowed_symbols.extend([
'arg_max',
'arg_min',
'mul', # use tf.multiply instead.
'neg', # use tf.negative instead.
'sub', # use tf.subtract instead.
'create_partitioned_variables',
'deserialize_many_sparse',
'lin_space',
'list_diff', # Use tf.listdiff instead.
'listdiff', # Use tf.listdiff instead.
'parse_single_sequence_example',
'serialize_many_sparse',
'serialize_sparse',
'sparse_matmul', ## use tf.matmul instead.
])
# This is needed temporarily because we import it explicitly.
_allowed_symbols.extend([
'pywrap_tensorflow',
])
# Dtypes exported by framework/dtypes.py.
# TODO(cwhipkey): expose these through documentation.
_allowed_symbols.extend([
'QUANTIZED_DTYPES',
'bfloat16',
'bool',
'complex64',
'complex128',
'double',
'half',
'float16',
'float32',
'float64',
'int16',
'int32',
'int64',
'int8',
'qint16',
'qint32',
'qint8',
'quint16',
'quint8',
'string',
'uint64',
'uint32',
'uint16',
'uint8',
'resource',
'variant',
])
# Export modules and constants.
_allowed_symbols.extend([
'app',
'bitwise',
'compat',
'data',
'distributions',
'errors',
'estimator',
'feature_column',
'flags',
'gfile',
'graph_util',
'image',
'initializers',
'keras',
'layers',
'linalg',
'logging',
'losses',
'metrics',
'newaxis',
'nn',
'profiler',
'python_io',
'resource_loader',
'saved_model',
'sets',
'spectral',
'summary',
'sysconfig',
'test',
'train',
'user_ops',
])
# Variables framework.versions:
_allowed_symbols.extend([
'VERSION',
'GIT_VERSION',
'COMPILER_VERSION',
'CXX11_ABI_FLAG',
'MONOLITHIC_BUILD',
])
# Remove all extra symbols that don't have a docstring or are not explicitly
# referenced in the whitelist.
remove_undocumented(__name__, _allowed_symbols, [
framework_lib, array_ops, check_ops, client_lib, compat, constant_op,
control_flow_ops, confusion_matrix_m, data, distributions,
functional_ops, histogram_ops, io_ops, keras, layers,
losses, math_ops, metrics, nn, profiler, resource_loader, sets, script_ops,
session_ops, sparse_ops, state_ops, string_ops, summary, tensor_array_ops,
train
])
# Special dunders that we choose to export:
_exported_dunders = set([
'__version__',
'__git_version__',
'__compiler_version__',
'__cxx11_abi_flag__',
'__monolithic_build__',
])
# Expose symbols minus dunders, unless they are whitelisted above.
# This is necessary to export our dunders.
__all__ = [s for s in dir() if s in _exported_dunders or not s.startswith('_')]
| {
"content_hash": "d7190910340665d1bde42ca564c9af28",
"timestamp": "",
"source": "github",
"line_count": 277,
"max_line_length": 81,
"avg_line_length": 30.140794223826713,
"alnum_prop": 0.735896514552641,
"repo_name": "lakshayg/tensorflow",
"id": "bc9ddec2a54a784027120828e9b15a2bf500414e",
"size": "9038",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tensorflow/python/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8458"
},
{
"name": "C",
"bytes": "199316"
},
{
"name": "C++",
"bytes": "30640525"
},
{
"name": "CMake",
"bytes": "647767"
},
{
"name": "Go",
"bytes": "993982"
},
{
"name": "Java",
"bytes": "439699"
},
{
"name": "Jupyter Notebook",
"bytes": "1833675"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38167"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "7546"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "28170288"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "380226"
}
],
"symlink_target": ""
} |
from . import TGBot
import argparse
def build_parser():
parser = argparse.ArgumentParser(description='Run your own Telegram bot.')
parser.add_argument('plugins', metavar='plugin', nargs='*',
help='a subclass of TGPluginBase (ex: plugins.echo.EchoPlugin)')
parser.add_argument('--token', '-t', dest='token',
help='bot token provided by @BotFather')
parser.add_argument('--nocommand', '-n', dest='nocmd',
help='plugin.method to be used for non-command messages')
parser.add_argument('--polling', '-p', dest='polling', type=int, default=2,
help='interval (in seconds) to check for message updates')
parser.add_argument('--db_url', '-d', dest='db_url',
help='URL for database (default is in-memory sqlite)')
parser.add_argument('--listcommands', '-l', dest='list', action='store_const',
const=True, default=False,
help='plugin method to be used for non-command messages (ex: plugins.simsimi.SimsimiPlugin.simsimi)')
parser.add_argument('--webhook', '-w', dest='webhook', nargs=2, metavar=('hook_url', 'port'),
help='use webhooks (instead of polling) - requires bottle')
return parser
def import_class(cl):
d = cl.rfind(".")
class_name = cl[d + 1:len(cl)]
m = __import__(cl[0:d], globals(), locals(), [class_name])
return getattr(m, class_name)
def main():
from requests.packages import urllib3
urllib3.disable_warnings()
parser = build_parser()
args = parser.parse_args()
plugins = []
try:
for plugin_name in args.plugins:
cl = import_class(plugin_name)
plugins.append(cl())
nocmd = None
if args.nocmd is not None:
cl = import_class(args.nocmd)
nocmd = cl()
except Exception as e:
parser.error(e.message)
tg = TGBot(args.token, plugins=plugins, no_command=nocmd, db_url=args.db_url)
if args.list:
tg.print_commands()
return
if args.token is None:
parser.error('--token is required')
if args.webhook is None:
tg.run(polling_time=args.polling)
else:
tg.run_web(args.webhook[0], host='0.0.0.0', port=int(args.webhook[1]))
if __name__ == '__main__':
main()
| {
"content_hash": "92b25b4254970d9f0e3fb88ed2ee3de5",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 125,
"avg_line_length": 34.507246376811594,
"alnum_prop": 0.5833683326333473,
"repo_name": "pmpfl/tgbotplug",
"id": "c5f4d9a66d853034b1530a83239bb2c225488e8b",
"size": "2403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tgbot/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32260"
}
],
"symlink_target": ""
} |
from base_test import BaseTestCase
from pydyty.monitor import Monitored
class MonitorTestCase(BaseTestCase):
def test_monitor(self):
class A(Monitored):
def foo(self, x):
return x
# This doesn't work yet.
def bar(self, x, y):
return x + y
a = A()
self.assertEqual(1, a.foo(1))
self.assertEqual('[foo: (int) -> int]',
str(A.__pydyty_type__))
self.assertEqual(2, a.bar(1, 1))
self.assertEqual("[bar: (int, int) -> int, foo: (int) -> int]",
str(A.__pydyty_type__))
| {
"content_hash": "0f1e65bd8ffef68d490811cc98dcf5dd",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 71,
"avg_line_length": 27.73913043478261,
"alnum_prop": 0.493730407523511,
"repo_name": "davidan42/pydyty",
"id": "5ba06e51725fbcc86da6c05fe60f5d8534c2b8dc",
"size": "638",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_monitor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "38267"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand
from django_countries.data import COUNTRIES
from corehq.apps.domain.models import Domain
class Command(BaseCommand):
help = "Migrates old django domain countries from string to list. Sept 2014."
args = ""
label = ""
def handle(self, *args, **options):
print "Migrating Domain countries"
country_lookup = {v.lower(): k for k, v in COUNTRIES.iteritems()}
#Special cases
country_lookup["USA"] = country_lookup["united states"]
country_lookup["California"] = country_lookup["united states"]
country_lookup["Wales"] = country_lookup["united kingdom"]
for domain in Domain.get_all():
if domain.deployment._doc.get('countries', None):
continue
try:
country = None
if domain.deployment._doc.get('country', None):
country = domain.deployment._doc['country']
elif domain._doc.get('country', None):
country = domain._doc['country']
if country:
if ',' in country:
countries = country.split(',')
elif ' and ' in country:
countries = country.split(' and ')
else:
countries = [country]
abbr = []
for country in countries:
country = country.strip().lower()
if country in country_lookup.keys():
abbr.append(country_lookup[country])
domain.deployment.countries = abbr
domain.save()
except Exception as e:
print "There was an error migrating the domain named %s." % domain.name
print "Error: %s" % e
| {
"content_hash": "9068c29d7bbeb04ed6e96cb4e3505d73",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 87,
"avg_line_length": 39.354166666666664,
"alnum_prop": 0.5224986765484383,
"repo_name": "qedsoftware/commcare-hq",
"id": "7d1023e2fc6b250ac103b14b859be460da613b3a",
"size": "1889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/domain/management/commands/migrate_domain_countries.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "508392"
},
{
"name": "HTML",
"bytes": "2869325"
},
{
"name": "JavaScript",
"bytes": "2395360"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "125298"
},
{
"name": "Python",
"bytes": "14670713"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import os
import logging
from .const import Constant
FILE_NAME = Constant.user_log_file
if not os.path.isdir(Constant.user_conf_dir):
os.mkdir(Constant.user_conf_dir)
if not os.path.isdir(Constant.user_backend_dir):
os.mkdir(Constant.user_backend_dir)
with open(FILE_NAME, 'a+') as f:
f.write('#' * 80)
f.write('\n')
def getLogger(name):
log = logging.getLogger(name)
log.setLevel(logging.DEBUG)
# File output handler
fh = logging.FileHandler(FILE_NAME)
fh.setLevel(logging.DEBUG)
fh.setFormatter(logging.Formatter(
'%(asctime)s - %(levelname)s - %(name)s:%(lineno)s: %(message)s')) # NOQA
log.addHandler(fh)
return log
| {
"content_hash": "044e6150574cb367e616e3d408632088",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 82,
"avg_line_length": 23.885714285714286,
"alnum_prop": 0.6854066985645934,
"repo_name": "1dot75cm/repo-checker",
"id": "5df7ac75a79160b40674f1f65317357180523c67",
"size": "861",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "checker/logger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "440"
},
{
"name": "Python",
"bytes": "116253"
}
],
"symlink_target": ""
} |
"""Django settings for Pontoon."""
import os
import socket
from django.utils.functional import lazy
import dj_database_url
_dirname = os.path.dirname
ROOT = _dirname(_dirname(_dirname(os.path.abspath(__file__))))
def path(*args):
return os.path.join(ROOT, *args)
# Environment-dependent settings. These are loaded from environment
# variables.
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.environ['SECRET_KEY']
# Is this a dev instance?
DEV = os.environ.get('DJANGO_DEV', 'False') != 'False'
DEBUG = os.environ.get('DJANGO_DEBUG', 'False') != 'False'
HEROKU_DEMO = os.environ.get('HEROKU_DEMO', 'False') != 'False'
DJANGO_LOGIN = os.environ.get('DJANGO_LOGIN', 'False') != 'False' or HEROKU_DEMO
ADMINS = MANAGERS = (
(os.environ.get('ADMIN_NAME', ''),
os.environ.get('ADMIN_EMAIL', '')),
)
# A list of project manager email addresses to send project requests to
PROJECT_MANAGERS = os.environ.get('PROJECT_MANAGERS', '').split(',')
DATABASES = {
'default': dj_database_url.config(default='mysql://root@localhost/pontoon')
}
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.environ.get('STATIC_ROOT', path('static'))
# Optional CDN hostname for static files, e.g. '//asdf.cloudfront.net'
STATIC_HOST = os.environ.get('STATIC_HOST', '')
SESSION_COOKIE_HTTPONLY = os.environ.get('SESSION_COOKIE_HTTPONLY', 'True') != 'False'
SESSION_COOKIE_SECURE = os.environ.get('SESSION_COOKIE_SECURE', 'True') != 'False'
APP_URL_KEY = 'APP_URL'
# For the sake of integration with Heroku, we dynamically load domain name
# From the file that's set right after the build phase.
if os.environ.get('HEROKU_DEMO') and not os.environ.get('SITE_URL'):
def _site_url():
from django.contrib.sites.models import Site
from django.core.cache import cache
app_url = cache.get(APP_URL_KEY)
# Sometimes data from cache is flushed, We can't do anything about that.
if not app_url:
app_url = "https://{}".format(Site.objects.get(pk=1).domain)
cache.set(APP_URL_KEY, app_url)
return app_url
SITE_URL = lazy(_site_url, str)()
else:
SITE_URL = os.environ.get('SITE_URL', 'http://localhost:8000')
# Custom LD_LIBRARY_PATH environment variable for SVN
SVN_LD_LIBRARY_PATH = os.environ.get('SVN_LD_LIBRARY_PATH', '')
# Disable forced SSL if debug mode is enabled or if CI is running the
# tests.
SSLIFY_DISABLE = DEBUG or os.environ.get('CI', False)
# URL to the RabbitMQ server
BROKER_URL = os.environ.get('RABBITMQ_URL', None)
# Microsoft Translator API Key
MICROSOFT_TRANSLATOR_API_KEY = os.environ.get('MICROSOFT_TRANSLATOR_API_KEY', '')
# Google Analytics Key
GOOGLE_ANALYTICS_KEY = os.environ.get('GOOGLE_ANALYTICS_KEY', '')
# Raygun.io configuration
RAYGUN4PY_CONFIG = {
'api_key': os.environ.get('RAYGUN_APIKEY', '')
}
# Email settings
EMAIL_HOST_USER = os.environ.get('SENDGRID_USERNAME', '')
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_PASSWORD = os.environ.get('SENDGRID_PASSWORD', '')
# Log emails to console if the SendGrid credentials are missing.
if EMAIL_HOST_USER and EMAIL_HOST_PASSWORD:
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
else:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Environment-independent settings. These shouldn't have to change
# between server environments.
ROOT_URLCONF = 'pontoon.urls'
INSTALLED_APPS = (
'pontoon.administration',
'pontoon.base',
'pontoon.contributors',
'pontoon.intro',
'pontoon.localizations',
'pontoon.machinery',
'pontoon.projects',
'pontoon.sync',
'pontoon.teams',
# Django contrib apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.staticfiles',
# Django sites app is required by django-allauth
'django.contrib.sites',
# Third-party apps, patches, fixes
'commonware.response.cookies',
'django_jinja',
'django_nose',
'pipeline',
'session_csrf',
'guardian',
'corsheaders',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.fxa',
)
BLOCKED_IPS = os.environ.get('BLOCKED_IPS', '').split(',')
MIDDLEWARE_CLASSES = (
'django.middleware.gzip.GZipMiddleware',
'sslify.middleware.SSLifyMiddleware',
'pontoon.base.middleware.RaygunExceptionMiddleware',
'pontoon.base.middleware.BlockedIpMiddleware',
'pontoon.base.middleware.HerokuDemoSetupMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'session_csrf.CsrfMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'csp.middleware.CSPMiddleware',
)
CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.media',
'django.template.context_processors.request',
'session_csrf.context_processor',
'django.contrib.messages.context_processors.messages',
'pontoon.base.context_processors.globals',
)
TEMPLATES = [
{
'BACKEND': 'django_jinja.backend.Jinja2',
'APP_DIRS': True,
'OPTIONS': {
'match_extension': '',
'match_regex': r'^(?!(admin|registration|account|socialaccount)/).*\.(html|jinja|js)$',
'context_processors': CONTEXT_PROCESSORS,
'extensions': [
'jinja2.ext.do',
'jinja2.ext.loopcontrols',
'jinja2.ext.with_',
'jinja2.ext.i18n',
'jinja2.ext.autoescape',
'django_jinja.builtins.extensions.CsrfExtension',
'django_jinja.builtins.extensions.CacheExtension',
'django_jinja.builtins.extensions.TimezoneExtension',
'django_jinja.builtins.extensions.UrlsExtension',
'django_jinja.builtins.extensions.StaticFilesExtension',
'django_jinja.builtins.extensions.DjangoFiltersExtension',
'pipeline.templatetags.ext.PipelineExtension',
],
}
},
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [path('pontoon/base/templates/django')],
'OPTIONS': {
'debug': DEBUG,
'context_processors': CONTEXT_PROCESSORS,
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
}
},
]
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
'guardian.backends.ObjectPermissionBackend',
]
# This variable is required by django-guardian.
# App supports giving permissions for anonymous users.
ANONYMOUS_USER_ID = -1
GUARDIAN_RAISE_403 = True
PIPELINE_COMPILERS = (
'pipeline.compilers.es6.ES6Compiler',
)
PIPELINE_YUGLIFY_BINARY = path('node_modules/.bin/yuglify')
PIPELINE_BABEL_BINARY = path('node_modules/.bin/babel')
PIPELINE_BABEL_ARGUMENTS = '--modules ignore'
PIPELINE_DISABLE_WRAPPER = True
PIPELINE_CSS = {
'base': {
'source_filenames': (
'css/font-awesome.css',
'css/nprogress.css',
'css/style.css',
),
'output_filename': 'css/base.min.css',
},
'admin': {
'source_filenames': (
'css/table.css',
'css/admin.css',
),
'output_filename': 'css/admin.min.css',
},
'admin_project': {
'source_filenames': (
'css/multiple_locale_selector.css',
'css/admin_project.css',
),
'output_filename': 'css/admin_project.min.css',
},
'project': {
'source_filenames': (
'css/table.css',
'css/contributors.css',
'css/heading_info.css',
),
'output_filename': 'css/project.min.css',
},
'localization': {
'source_filenames': (
'css/table.css',
'css/contributors.css',
'css/heading_info.css',
),
'output_filename': 'css/localization.min.css',
},
'projects': {
'source_filenames': (
'css/heading_info.css',
'css/table.css',
),
'output_filename': 'css/projects.min.css',
},
'team': {
'source_filenames': (
'css/table.css',
'css/contributors.css',
'css/heading_info.css',
'css/team.css',
),
'output_filename': 'css/team.min.css',
},
'teams': {
'source_filenames': (
'css/heading_info.css',
'css/table.css',
),
'output_filename': 'css/teams.min.css',
},
'sync_logs': {
'source_filenames': (
'css/sync_logs.css',
),
'output_filename': 'css/sync_logs.min.css',
},
'translate': {
'source_filenames': (
'css/jquery-ui.css',
'css/jquery-ui-timepicker-addon.css',
'css/translate.css',
),
'output_filename': 'css/translate.min.css',
},
'profile': {
'source_filenames': (
'css/contributor.css',
'css/profile.css',
),
'output_filename': 'css/profile.min.css',
},
'settings': {
'source_filenames': (
'css/multiple_locale_selector.css',
'css/contributor.css',
'css/settings.css',
),
'output_filename': 'css/settings.min.css',
},
'machinery': {
'source_filenames': (
'css/machinery.css',
),
'output_filename': 'css/machinery.min.css',
},
'contributors': {
'source_filenames': (
'css/heading_info.css',
'css/contributors.css',
),
'output_filename': 'css/contributors.min.css',
},
'intro': {
'source_filenames': (
'css/bootstrap.min.css',
'css/agency.css',
),
'output_filename': 'css/intro.min.css',
},
'terms': {
'source_filenames': (
'css/terms.css',
),
'output_filename': 'css/terms.min.css',
},
}
PIPELINE_JS = {
'base': {
'source_filenames': (
'js/lib/jquery-1.11.1.min.js',
'js/lib/jquery.timeago.js',
'js/lib/nprogress.js',
'js/main.js',
),
'output_filename': 'js/base.min.js',
},
'admin': {
'source_filenames': (
'js/table.js',
),
'output_filename': 'js/admin.min.js',
},
'admin_project': {
'source_filenames': (
'js/lib/jquery-ui.js',
'js/multiple_locale_selector.js',
'js/admin_project.js',
),
'output_filename': 'js/admin_project.min.js',
},
'localization': {
'source_filenames': (
'js/table.js',
'js/progress-chart.js',
'js/tabs.js',
),
'output_filename': 'js/localization.min.js',
},
'project': {
'source_filenames': (
'js/table.js',
'js/progress-chart.js',
'js/tabs.js',
),
'output_filename': 'js/project.min.js',
},
'projects': {
'source_filenames': (
'js/table.js',
'js/progress-chart.js',
),
'output_filename': 'js/projects.min.js',
},
'team': {
'source_filenames': (
'js/table.js',
'js/progress-chart.js',
'js/bugzilla.js',
'js/tabs.js',
'js/request_projects.js',
'js/permissions.js',
'js/team.js',
),
'output_filename': 'js/team.min.js',
},
'teams': {
'source_filenames': (
'js/table.js',
'js/progress-chart.js',
),
'output_filename': 'js/teams.min.js',
},
'translate': {
'source_filenames': (
'js/lib/jquery-ui.js',
'js/lib/jquery-ui-timepicker-addon.js',
'js/lib/jquery.mark.js',
'js/lib/highstock.js',
'js/lib/diff.js',
'js/translate.js',
),
'output_filename': 'js/translate.min.js',
},
'profile': {
'source_filenames': (
'js/contributor.js',
),
'output_filename': 'js/profile.min.js',
},
'settings': {
'source_filenames': (
'js/lib/jquery-ui.js',
'js/multiple_locale_selector.js',
'js/contributor.js',
),
'output_filename': 'js/settings.min.js',
},
'machinery': {
'source_filenames': (
'js/lib/clipboard.min.js',
'js/machinery.js',
),
'output_filename': 'js/machinery.min.js',
},
}
# Cache config
# If the environment contains configuration data for Memcached, use
# PyLibMC for the cache backend. Otherwise, default to an in-memory
# cache.
if os.environ.get('MEMCACHE_SERVERS') is not None:
CACHES = {
'default': {
'BACKEND': 'django_pylibmc.memcached.PyLibMCCache',
'TIMEOUT': 500,
'BINARY': True,
'OPTIONS': {}
}
}
else:
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'pontoon'
}
}
# Site ID is used by Django's Sites framework.
SITE_ID = 1
## Media and templates.
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = path('media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = STATIC_HOST + '/static/'
STATICFILES_STORAGE = 'pontoon.base.storage.GzipManifestPipelineStorage'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
# Set ALLOWED_HOSTS based on SITE_URL setting.
def _allowed_hosts():
from django.conf import settings
from urlparse import urlparse
host = urlparse(settings.SITE_URL).netloc # Remove protocol and path
host = host.rsplit(':', 1)[0] # Remove port
return [host]
ALLOWED_HOSTS = lazy(_allowed_hosts, list)()
## Auth
# The first hasher in this list will be used for new passwords.
# Any other hasher in the list can be used for existing passwords.
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.UnsaltedMD5PasswordHasher',
)
## Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'formatters': {
'verbose': {
'format': '[%(levelname)s:%(name)s] %(asctime)s %(message)s'
},
},
'loggers': {
'django': {
'handlers': ['console'],
},
'pontoon': {
'handlers': ['console'],
'level': os.environ.get('DJANGO_LOG_LEVEL', 'INFO'),
},
}
}
if DEBUG:
LOGGING['handlers']['console']['formatter'] = 'verbose'
if os.environ.get('DJANGO_SQL_LOG', False):
LOGGING['loggers']['django.db.backends'] = {
'level': 'DEBUG',
'handlers': ['console']
}
## Tests
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = ['--logging-filter=-factory,-django.db,-raygun4py',
'--logging-clear-handlers']
# Disable nose-progressive on CI due to ugly output.
if not os.environ.get('CI', False):
NOSE_ARGS.append('--with-progressive')
# General auth settings
LOGIN_URL = '/'
LOGIN_REDIRECT_URL = '/'
LOGIN_REDIRECT_URL_FAILURE = '/'
# Should robots.txt deny everything or disallow a calculated list of
# URLs we don't want to be crawled? Default is false, disallow
# everything.
ENGAGE_ROBOTS = False
# Always generate a CSRF token for anonymous users.
ANON_ALWAYS = True
# Set X-Frame-Options to DENY by default on all responses.
X_FRAME_OPTIONS = 'DENY'
# Use correct header for detecting HTTPS on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Strict-Transport-Security: max-age=63072000
# Ensures users only visit the site over HTTPS
SECURE_HSTS_SECONDS = 63072000
# X-Content-Type-Options: nosniff
# Disables browser MIME type sniffing
SECURE_CONTENT_TYPE_NOSNIFF = True
# x-xss-protection: 1; mode=block
# Activates the browser's XSS filtering and helps prevent XSS attacks
SECURE_BROWSER_XSS_FILTER = True
# Content-Security-Policy headers
CSP_DEFAULT_SRC = ("'none'",)
CSP_CHILD_SRC = ("https:",)
CSP_FRAME_SRC = ("https:",) # Older browsers
CSP_CONNECT_SRC = (
"'self'",
"https://bugzilla.mozilla.org/rest/bug",
)
CSP_FONT_SRC = ("'self'",)
CSP_IMG_SRC = (
"'self'",
"https://*.wp.com/pontoon.mozilla.org/",
"https://ssl.google-analytics.com",
"https://www.gravatar.com/avatar/",
)
CSP_SCRIPT_SRC = (
"'self'",
"'unsafe-eval'",
"'sha256-x3niK4UU+vG6EGT2NK2rwi2j/etQodJd840oRpEnqd4='",
"'sha256-fDsgbzHC0sNuBdM4W91nXVccgFLwIDkl197QEca/Cl4='",
"https://ssl.google-analytics.com/ga.js",
)
CSP_STYLE_SRC = ("'self'", "'unsafe-inline'",)
# Needed if site not hosted on HTTPS domains (like local setup)
if not (HEROKU_DEMO or SITE_URL.startswith('https')):
CSP_IMG_SRC = CSP_IMG_SRC + ("http://www.gravatar.com/avatar/",)
CSP_CHILD_SRC = CSP_FRAME_SRC = CSP_FRAME_SRC + ("http:",)
# For absolute urls
try:
DOMAIN = socket.gethostname()
except socket.error:
DOMAIN = 'localhost'
PROTOCOL = "http://"
PORT = 80
# Names for slave databases from the DATABASES setting.
SLAVE_DATABASES = []
## Internationalization.
# Enable timezone-aware datetimes.
USE_TZ = True
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = os.environ.get('TZ', 'UTC')
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = False
# Microsoft Translator Locales
MICROSOFT_TRANSLATOR_LOCALES = [
'ar', 'bs-Latn', 'bg', 'ca', 'zh-CHS', 'zh-CHT', 'hr', 'cs', 'da', 'nl',
'en', 'et', 'fi', 'fr', 'de', 'el', 'ht', 'he', 'hi', 'mww', 'hu', 'id',
'it', 'ja', 'tlh', 'tlh-Qaak', 'ko', 'lv', 'lt', 'ms', 'mt', 'yua', 'no',
'otq', 'fa', 'pl', 'pt', 'ro', 'ru', 'sr-Cyrl', 'sr-Latn', 'sk', 'sl',
'es', 'sv', 'th', 'tr', 'uk', 'ur', 'vi', 'cy'
]
# Microsoft Terminology Service API Locales
MICROSOFT_TERMINOLOGY_LOCALES = [
'af-za', 'am-et', 'ar-eg', 'ar-sa', 'as-in', 'az-latn-az', 'be-by',
'bg-bg', 'bn-bd', 'bn-in', 'bs-cyrl-ba', 'bs-latn-ba', 'ca-es',
'ca-es-valencia', 'chr-cher-us', 'cs-cz', 'cy-gb', 'da-dk', 'de-at',
'de-ch', 'de-de', 'el-gr', 'en-au', 'en-ca', 'en-gb', 'en-ie', 'en-my',
'en-nz', 'en-ph', 'en-sg', 'en-us', 'en-za', 'es-es', 'es-mx', 'es-us',
'et-ee', 'eu-es', 'fa-ir', 'fi-fi', 'fil-ph', 'fr-be', 'fr-ca', 'fr-ch',
'fr-fr', 'fr-lu', 'fuc-latn-sn', 'ga-ie', 'gd-gb', 'gl-es', 'gu-in',
'guc-ve', 'ha-latn-ng', 'he-il', 'hi-in', 'hr-hr', 'hu-hu', 'hy-am',
'id-id', 'ig-ng', 'is-is', 'it-ch', 'it-it', 'iu-latn-ca', 'ja-jp',
'ka-ge', 'kk-kz', 'km-kh', 'kn-in', 'ko-kr', 'kok-in', 'ku-arab-iq',
'ky-kg', 'lb-lu', 'lo-la', 'lt-lt', 'lv-lv', 'mi-nz', 'mk-mk', 'ml-in',
'mn-mn', 'mr-in', 'ms-bn', 'ms-my', 'mt-mt', 'nb-no', 'ne-np', 'nl-be',
'nl-nl', 'nn-no', 'nso-za', 'or-in', 'pa-arab-pk', 'pa-in', 'pl-pl',
'prs-af', 'ps-af', 'pt-br', 'pt-pt', 'qut-gt', 'quz-pe', 'rm-ch', 'ro-ro',
'ru-ru', 'rw-rw', 'sd-arab-pk', 'si-lk', 'sk-sk', 'sl-si', 'sp-xl',
'sq-al', 'sr-cyrl-ba', 'sr-cyrl-rs', 'sr-latn-rs', 'sv-se', 'sw-ke',
'ta-in', 'te-in', 'tg-cyrl-tj', 'th-th', 'ti-et', 'tk-tm', 'tl-ph',
'tn-za', 'tr-tr', 'tt-ru', 'ug-cn', 'uk-ua', 'ur-pk', 'uz-latn-uz',
'vi-vn', 'wo-sn', 'xh-za', 'yo-ng', 'zh-cn', 'zh-hk', 'zh-tw', 'zu-za',
]
# Contributors to exclude from Top Contributors list
EXCLUDE = os.environ.get('EXCLUDE', '').split(',')
SYNC_TASK_TIMEOUT = 60 * 60 * 1 # 1 hour
SYNC_LOG_RETENTION = 90 # days
MANUAL_SYNC = os.environ.get('MANUAL_SYNC', 'False') != 'False'
# Celery
# Execute celery tasks locally instead of in a worker unless the
# environment is configured.
CELERY_ALWAYS_EAGER = os.environ.get('CELERY_ALWAYS_EAGER', 'True') != 'False'
# Limit the number of tasks a celery worker can handle before being replaced.
try:
CELERYD_MAX_TASKS_PER_CHILD = int(os.environ.get('CELERYD_MAX_TASKS_PER_CHILD', ''))
except ValueError:
CELERYD_MAX_TASKS_PER_CHILD = 20
BROKER_POOL_LIMIT = 1 # Limit to one connection per worker
BROKER_CONNECTION_TIMEOUT = 30 # Give up connecting faster
CELERY_RESULT_BACKEND = None # We don't store results
CELERY_SEND_EVENTS = False # We aren't yet monitoring events
# Settings related to the CORS mechanisms.
# For the sake of integration with other sites,
# some of javascript files (e.g. pontoon.js)
# require Access-Control-Allow-Origin header to be set as '*'.
CORS_ORIGIN_ALLOW_ALL = True
CORS_URLS_REGEX = r'^/pontoon\.js$'
SOCIALACCOUNT_ENABLED = True
SOCIALACCOUNT_ADAPTER = 'pontoon.base.adapter.PontoonSocialAdapter'
def account_username(user):
return user.name_or_email
ACCOUNT_AUTHENTICATED_METHOD = 'email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'none'
ACCOUNT_USER_DISPLAY = account_username
# Firefox Accounts
FXA_CLIENT_ID = os.environ.get('FXA_CLIENT_ID', '')
FXA_SECRET_KEY = os.environ.get('FXA_SECRET_KEY', '')
FXA_OAUTH_ENDPOINT = os.environ.get('FXA_OAUTH_ENDPOINT', '')
FXA_PROFILE_ENDPOINT = os.environ.get('FXA_PROFILE_ENDPOINT', '')
FXA_SCOPE = ['profile:uid', 'profile:display_name', 'profile:email']
# All settings related to the AllAuth
SOCIALACCOUNT_PROVIDERS = {
'fxa': {
'SCOPE': FXA_SCOPE,
'OAUTH_ENDPOINT': FXA_OAUTH_ENDPOINT,
'PROFILE_ENDPOINT': FXA_PROFILE_ENDPOINT,
}
}
# Defined all trusted origins that will be returned in pontoon.js file.
if os.environ.get('JS_TRUSTED_ORIGINS'):
JS_TRUSTED_ORIGINS = os.environ.get('JS_TRUSTED_ORIGINS').split(',')
else:
JS_TRUSTED_ORIGINS = [
SITE_URL,
]
| {
"content_hash": "3b1c05b51041a1bb2ecb4b81b608217c",
"timestamp": "",
"source": "github",
"line_count": 765,
"max_line_length": 99,
"avg_line_length": 31.08888888888889,
"alnum_prop": 0.6080393558424084,
"repo_name": "participedia/pontoon",
"id": "5b25785ff2e02d76ae225057d8de26b8a21db6f4",
"size": "23783",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pontoon/settings/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "90764"
},
{
"name": "HTML",
"bytes": "116141"
},
{
"name": "JavaScript",
"bytes": "319656"
},
{
"name": "Python",
"bytes": "726046"
},
{
"name": "Shell",
"bytes": "1360"
}
],
"symlink_target": ""
} |
from tempest_lib import decorators
import testtools
from tempest.api.telemetry import base
from tempest import config
from tempest import test
CONF = config.CONF
class TelemetryNotificationAPITestJSON(base.BaseTelemetryTest):
_interface = 'json'
@classmethod
def resource_setup(cls):
if CONF.telemetry.too_slow_to_test:
raise cls.skipException("Ceilometer feature for fast work mysql "
"is disabled")
super(TelemetryNotificationAPITestJSON, cls).resource_setup()
@test.attr(type="gate")
@testtools.skipIf(not CONF.service_available.nova,
"Nova is not available.")
def test_check_nova_notification(self):
resp, body = self.create_server()
self.assertEqual(resp.status, 202)
query = ('resource', 'eq', body['id'])
for metric in self.nova_notifications:
self.await_samples(metric, query)
@test.attr(type="smoke")
@test.services("image")
@testtools.skipIf(not CONF.image_feature_enabled.api_v1,
"Glance api v1 is disabled")
@decorators.skip_because(bug='1351627')
def test_check_glance_v1_notifications(self):
body = self.create_image(self.image_client)
self.image_client.update_image(body['id'], data='data')
query = 'resource', 'eq', body['id']
self.image_client.delete_image(body['id'])
for metric in self.glance_notifications:
self.await_samples(metric, query)
@test.attr(type="smoke")
@test.services("image")
@testtools.skipIf(not CONF.image_feature_enabled.api_v2,
"Glance api v2 is disabled")
@decorators.skip_because(bug='1351627')
def test_check_glance_v2_notifications(self):
body = self.create_image(self.image_client_v2)
self.image_client_v2.store_image(body['id'], "file")
self.image_client_v2.get_image_file(body['id'])
query = 'resource', 'eq', body['id']
for metric in self.glance_v2_notifications:
self.await_samples(metric, query)
| {
"content_hash": "cafb515ce4e70c00a08c521a2694bc8f",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 77,
"avg_line_length": 32.875,
"alnum_prop": 0.6368821292775665,
"repo_name": "CiscoSystems/tempest",
"id": "7e5d6eecf3b6453a6488b6a06dc6eae1e9ecfef7",
"size": "2677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/api/telemetry/test_telemetry_notification_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "695"
},
{
"name": "Python",
"bytes": "2806255"
},
{
"name": "Shell",
"bytes": "8560"
}
],
"symlink_target": ""
} |
import os
import sys
import requests
from getpass import getpass
# Using this method of secret storing:
# https://stackoverflow.com/questions/25501403/storing-the-secrets-passwords-in-a-separate-file
try:
import credentials
token = credentials.API_key
except Exception as e:
print (e)
token = getpass()
credentials_file = open("credentials.py", "w")
credentials_file.writelines("API_key = \"" + token +"\"")
credentials_file.close
API_URL = 'https://www.muckrock.com/api_v1/'
def get_api_key():
return token
def get_headers(token=None):
if token:
return {
'Authorization': 'Token %s' % token,
'content-type': 'application/json'
}
else:
return {'content-type': 'application/json'}
def display_progress(current, total):
percent = (current / total) * 100.00
sys.stdout.write("\r%d%%" % percent)
sys.stdout.flush()
| {
"content_hash": "f167e874226bc6b4279ec2c664364e43",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 95,
"avg_line_length": 24.81081081081081,
"alnum_prop": 0.6492374727668845,
"repo_name": "MuckRock/API-examples",
"id": "3ff4a69c639fa7428f5dd840f2b80b9be439912d",
"size": "964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "73425"
}
],
"symlink_target": ""
} |
"""Support for Modbus Coil sensors."""
import logging
import voluptuous as vol
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.components.modbus import (
CONF_HUB, DEFAULT_HUB, DOMAIN as MODBUS_DOMAIN)
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, CONF_SLAVE
from homeassistant.helpers import config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_COIL = 'coil'
CONF_COILS = 'coils'
DEPENDENCIES = ['modbus']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_COILS): [{
vol.Required(CONF_COIL): cv.positive_int,
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_HUB, default=DEFAULT_HUB): cv.string,
vol.Optional(CONF_SLAVE): cv.positive_int,
}]
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Modbus binary sensors."""
sensors = []
for coil in config.get(CONF_COILS):
hub = hass.data[MODBUS_DOMAIN][coil.get(CONF_HUB)]
sensors.append(ModbusCoilSensor(
hub, coil.get(CONF_NAME), coil.get(CONF_SLAVE),
coil.get(CONF_COIL)))
add_entities(sensors)
class ModbusCoilSensor(BinarySensorDevice):
"""Modbus coil sensor."""
def __init__(self, hub, name, slave, coil):
"""Initialize the Modbus coil sensor."""
self._hub = hub
self._name = name
self._slave = int(slave) if slave else None
self._coil = int(coil)
self._value = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the sensor."""
return self._value
def update(self):
"""Update the state of the sensor."""
result = self._hub.read_coils(self._slave, self._coil, 1)
try:
self._value = result.bits[0]
except AttributeError:
_LOGGER.error("No response from hub %s, slave %s, coil %s",
self._hub.name, self._slave, self._coil)
| {
"content_hash": "5529058d5628beb55443263dc5569bf5",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 71,
"avg_line_length": 30.357142857142858,
"alnum_prop": 0.6367058823529411,
"repo_name": "nugget/home-assistant",
"id": "4e0ab74445d2f1b267c313450efe4ca704c185a0",
"size": "2125",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/modbus/binary_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "826"
},
{
"name": "Python",
"bytes": "14492390"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17526"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/faction_perk/hq/shared_hq_s01.iff"
result.attribute_template_id = -1
result.stfName("faction_perk_n","hq_s01")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "806295f53795ac84f7c80c8c371b5955",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 70,
"avg_line_length": 23.23076923076923,
"alnum_prop": 0.6854304635761589,
"repo_name": "anhstudios/swganh",
"id": "b7d9666c4db1cd8c4c2523ccf3e4e057af6e870a",
"size": "447",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/building/faction_perk/hq/shared_hq_s01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
from distutils.core import setup, Command
from unittest import TextTestRunner, TestLoader
import perlinpinpin
class TestCommand(Command):
description = "run unit tests"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
"""Run unit tests"""
tests = TestLoader().loadTestsFromName('tests')
t = TextTestRunner(verbosity=1)
t.run(tests)
setup(name='perlinpinpin',
version=perlinpinpin.__version__,
description='Convert french fuzzy date to a python date object.',
long_description="Convert french fuzzy dates like 'hier', 'il y a 1 semaine et 1 jour', 'mardi prochain', '4 Janvier', etc., to a date object.",
py_modules=['perlinpinpin'],
license='BSD License',
url='http://cyberdelia.github.com/perlinpinpin/',
keywords="convert fuzzy date time french",
classifiers=["Development Status :: 4 - Beta",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules"],
author='Timothee Peignier',
author_email='tim@tryphon.org',
cmdclass={'test': TestCommand}
)
| {
"content_hash": "a6710db5150341621f3075309a8433a7",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 150,
"avg_line_length": 33.45,
"alnum_prop": 0.6270553064275037,
"repo_name": "cyberdelia/perlinpinpin",
"id": "70ce4dbe24c56347aee077cef0959bddad2afb8a",
"size": "1384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "32288"
}
],
"symlink_target": ""
} |
"""
FB Chatbot - Simple Reply Plugin
~~~~~~
based on triggers, return a response
"""
__author__ = 'Tim Chan'
__email__ = 'github@timc.me'
__copyright__ = 'Copyright 2020 by Tim Chan'
__version__ = '2.0'
__license__ = 'MIT'
import pickle
import settings.settings as config
import plugins.common as helper
class SimnpleReply(object):
responses = {}
def __init__(self):
self.load_file()
helper.logmessage('Loaded SimpleReply Plugin')
def save_file(self):
with open('responses.dat', 'wb') as responsefile:
pickle.dump(self.responses, responsefile)
def load_file(self):
with open('responses.dat', 'rb') as responsefile:
try:
self.responses = pickle.loads(responsefile.read())
except EOFError:
self.responses = {}
def add_command(self, message):
try:
messagesplit = helper.parsemessage(message).split(' ')
command = str(messagesplit[0]).lower()
if command in self.responses:
return 'Command already exists! Delete the command first using !delcmd {}'.format(command)
response = str(" ".join(messagesplit[1:]))
self.responses[command] = response
self.save_file()
return '"{}" command added to return "{}"'.format(command, response)
except Exception as e:
helper.logmessage('Command Addition Error: {} | Message: {}'.format(str(e), message))
return 'Could not add command!'
def del_command(self, fbid, message):
if fbid in config.adminfbids:
command = str(helper.parsemessage(message).split(' ')[0]).lower()
if command in self.responses:
try:
del self.responses[command]
self.save_file()
return 'Successfully deleted command "{}"'.format(command)
except Exception as e:
helper.logmessage('Command Deletion Error: {} | Message: {}'.format(str(e), message))
return 'Could not delete command!'
else:
return 'Cannot delete what is not there!!'
else:
return 'NOOB U R NOT ADMIN'
def respond(self, command):
helper.logmessage('Simple Response Triggered')
command = command.translate(str.maketrans('', '', '!-:.^')).lower()
if command in self.responses:
return self.responses[command]
else:
return ''
def list_commands(self):
respstring = 'Custom Commands: !'
respstring += ", !".join(sorted(self.responses))
return respstring | {
"content_hash": "6d0d65a005bcc894dc89d715b436c49f",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 106,
"avg_line_length": 34.164556962025316,
"alnum_prop": 0.5694701741385698,
"repo_name": "TimLChan/FB-Altcoin-Stock-Chatbot",
"id": "36e7b4ff529090ef14f70ca7021c2dbc2308b036",
"size": "2746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/simplereply.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15295"
}
],
"symlink_target": ""
} |
__test__ = {"API_TESTS": r"""
>>> from django.conf import settings
>>> ORIGINAL_TIME_ZONE = settings.TIME_ZONE
>>> settings.TIME_ZONE = "UTC"
>>> from timezones import forms
# the default case where no timezone is given explicitly.
# uses settings.TIME_ZONE.
>>> f = forms.LocalizedDateTimeField()
>>> f.clean("2008-05-30 14:30:00")
datetime.datetime(2008, 5, 30, 14, 30, tzinfo=<UTC>)
# specify a timezone explicity. this may come from a UserProfile for example.
>>> f = forms.LocalizedDateTimeField(timezone="America/Denver")
>>> f.clean("2008-05-30 14:30:00")
datetime.datetime(2008, 5, 30, 20, 30, tzinfo=<UTC>)
>>> f = forms.TimeZoneField()
>>> f.clean('US/Eastern')
<DstTzInfo 'US/Eastern' EST-1 day, 19:00:00 STD>
>>> settings.TIME_ZONE = ORIGINAL_TIME_ZONE
""",
"DECORATOR_TESTS": r"""
>>> from timezones import decorators
>>> from datetime import *
>>> class Foo(object):
... datetime = datetime(2008, 6, 20, 23, 58, 17)
... @decorators.localdatetime('datetime')
... def localdatetime(self):
... return 'Australia/Lindeman'
...
>>> foo = Foo()
>>> foo.datetime
datetime.datetime(2008, 6, 20, 23, 58, 17)
>>> foo.localdatetime
datetime.datetime(2008, 6, 21, 9, 58, 17, tzinfo=<DstTzInfo 'Australia/Lindeman' EST+10:00:00 STD>)
>>> foo.localdatetime = datetime(2008, 6, 12, 23, 50, 0)
>>> foo.datetime
datetime.datetime(2008, 6, 12, 13, 50, tzinfo=<UTC>)
>>> foo.localdatetime
datetime.datetime(2008, 6, 12, 23, 50, tzinfo=<DstTzInfo 'Australia/Lindeman' EST+10:00:00 STD>)
"""}
| {
"content_hash": "e0abd52e8e55c898153dfb546cd26f3b",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 99,
"avg_line_length": 34.34090909090909,
"alnum_prop": 0.6724023825281271,
"repo_name": "indro/t2c",
"id": "eb7c87060f871a6453a9831294ee566533778c5a",
"size": "1512",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "apps/external_apps/timezones/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "4084"
},
{
"name": "Assembly",
"bytes": "3294"
},
{
"name": "Boo",
"bytes": "1111"
},
{
"name": "C",
"bytes": "146718"
},
{
"name": "C#",
"bytes": "17611"
},
{
"name": "C++",
"bytes": "79372"
},
{
"name": "CSS",
"bytes": "165869"
},
{
"name": "Clojure",
"bytes": "21964"
},
{
"name": "Common Lisp",
"bytes": "48874"
},
{
"name": "D",
"bytes": "5475"
},
{
"name": "Dylan",
"bytes": "683"
},
{
"name": "Emacs Lisp",
"bytes": "126207"
},
{
"name": "Erlang",
"bytes": "8972"
},
{
"name": "FORTRAN",
"bytes": "27700"
},
{
"name": "Haskell",
"bytes": "40419"
},
{
"name": "Java",
"bytes": "81362"
},
{
"name": "JavaScript",
"bytes": "75388"
},
{
"name": "Logtalk",
"bytes": "7260"
},
{
"name": "Lua",
"bytes": "8677"
},
{
"name": "Matlab",
"bytes": "469"
},
{
"name": "OCaml",
"bytes": "42416"
},
{
"name": "Objective-C",
"bytes": "778"
},
{
"name": "PHP",
"bytes": "17078"
},
{
"name": "Pascal",
"bytes": "84519"
},
{
"name": "Perl",
"bytes": "37504"
},
{
"name": "Python",
"bytes": "8018145"
},
{
"name": "R",
"bytes": "3468"
},
{
"name": "Ruby",
"bytes": "91230"
},
{
"name": "Scala",
"bytes": "272"
},
{
"name": "Scheme",
"bytes": "45856"
},
{
"name": "Shell",
"bytes": "117254"
},
{
"name": "Smalltalk",
"bytes": "15501"
},
{
"name": "VimL",
"bytes": "16660"
},
{
"name": "Visual Basic",
"bytes": "846"
},
{
"name": "XSLT",
"bytes": "755"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(name="oauth2",
version="1.2.1",
description="Library for OAuth version 1.0a.",
author="Joe Stump",
author_email="joe@simplegeo.com",
url="http://github.com/simplegeo/python-oauth2",
packages = find_packages(),
install_requires = ['httplib2'],
license = "MIT License",
keywords="oauth",
zip_safe = True,
tests_require=['nose', 'coverage', 'mox'])
| {
"content_hash": "afe3374b6fab3b7df40325799e62748f",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 54,
"avg_line_length": 33.07142857142857,
"alnum_prop": 0.6177105831533477,
"repo_name": "e-loue/python-oauth2",
"id": "b2f9f5e548f48568d5c3f633e750bec2587572c9",
"size": "519",
"binary": false,
"copies": "17",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "86224"
}
],
"symlink_target": ""
} |
"""Package for writing output files in HTML format.
This package is considered stable but it is not part of the public API.
"""
from .htmlfilewriter import HtmlFileWriter, ModelWriter
from .jsonwriter import JsonWriter
LOG = 'rebot/log.html'
REPORT = 'rebot/report.html'
LIBDOC = 'libdoc/libdoc.html'
TESTDOC = 'testdoc/testdoc.html'
| {
"content_hash": "880bf9ddc7647b927a71beb2836383d6",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 71,
"avg_line_length": 28.083333333333332,
"alnum_prop": 0.7685459940652819,
"repo_name": "yamateh/robotframework",
"id": "5bceed5a3c3d2f1963276645ab2e364a31e9a9f0",
"size": "943",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/robot/htmldata/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from pprint import pprint
import json
import sys
from networks.docker import build_image
from networks.docker import build_network
from networks.docker import inspect_network
from networks.docker import create_container
from networks.docker import connect_to_network
from networks.docker import stop_container
from networks.docker import create_gateway
# docker run --rm -it -p 8022:22 vulnerables/cve-2016-6515
class Network():
metasploitable = build_image("metasploitable")
gateway = build_image("nginx")
openssh = build_image("openssh-vulnerable")
def __init__(self):
self.nodes = []
self.subnets = []
self.connections = []
def add_subnet(self, subnet_name):
build_network(subnet_name)
self.subnets.append(subnet_name)
return self
def add_node(self, node, command=""):
self.nodes.append(create_container(node, command))
return self
def add_gateway(self, node, external_port, internal_port, command="nginx -g \"daemon off;\""):
self.nodes.append(create_gateway(node, external_port, internal_port, command))
return self
def connect_to_subnet(self, node, subnet):
self.connections.append((node, subnet))
connect_to_network(subnet, node)
return self
def inspect_subnet(self, subnet):
if subnet in self.subnets:
pprint(inspect_network(subnet))
else:
print("Subnet not connected.")
return self
def stop_network(self):
for node in self.nodes:
stop_container(node)
return self
def export(self):
return json.dumps({
"nodes" : self.nodes,
"subnets" : self.subnets,
"connections" : self.connections
})
def default_network():
"""Creates Docker simulation."""
network = Network()
return network \
.add_node(Network.metasploitable) \
.add_node(Network.metasploitable) \
.add_gateway(Network.gateway, 3000, 80) \
.add_subnet("test_subnet") \
.connect_to_subnet(network.nodes[0], network.subnets[0]) \
.connect_to_subnet(network.nodes[1], network.subnets[0]) \
.connect_to_subnet(network.nodes[2], network.subnets[0]) \
.add_gateway(Network.openssh, 8022, 22, "") \
.connect_to_subnet(network.nodes[3], network.subnets[0])
| {
"content_hash": "a787e779c3f1934ff05e22a97c78478e",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 98,
"avg_line_length": 32.189189189189186,
"alnum_prop": 0.6418975650713686,
"repo_name": "cyberImperial/attack-graphs",
"id": "26cc7da97f0e131a370c27fab83938f1f1fce6d7",
"size": "2382",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "networks/network.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "9679"
},
{
"name": "CSS",
"bytes": "160730"
},
{
"name": "HTML",
"bytes": "6919"
},
{
"name": "JavaScript",
"bytes": "17359"
},
{
"name": "Makefile",
"bytes": "1249"
},
{
"name": "Python",
"bytes": "114556"
},
{
"name": "Shell",
"bytes": "1232"
}
],
"symlink_target": ""
} |
import pytest
import requests
import os
import keyring
from astropy.coordinates import SkyCoord
import astropy.units as u
from astropy.table import Table, Column
from astropy.io.votable import parse
from astropy.io.votable.exceptions import W03, W50
from astroquery import log
import numpy as np
from astroquery.casda import Casda
from astroquery.exceptions import LoginError
try:
from unittest.mock import Mock, MagicMock
except ImportError:
pytest.skip("Install mock for the casda tests.", allow_module_level=True)
DATA_FILES = {'CIRCLE': 'cone.xml', 'RANGE': 'box.xml', 'DATALINK': 'datalink.xml', 'RUN_JOB': 'run_job.xml',
'COMPLETED_JOB': 'completed_job.xml', 'DATALINK_NOACCESS': 'datalink_noaccess.xml',
'cutout_CIRCLE_333.9092_-45.8418_0.5000': 'cutout_333.9092_-45.8418_0.5000.xml',
'AVAILABILITY': 'availability.xml'}
USERNAME = 'user'
PASSWORD = 'password'
class MockResponse:
def __init__(self, content):
self.content = content
self.text = content.decode()
self.status_code = 200
def raise_for_status(self):
return
def get_mockreturn(self, method, url, data=None, timeout=10,
files=None, params=None, headers=None, **kwargs):
log.debug("get_mockreturn url:{} params:{} kwargs:{}".format(url, params, kwargs))
if kwargs and 'auth' in kwargs:
auth = kwargs['auth']
if auth and (auth[0] != USERNAME or auth[1] != PASSWORD):
log.debug("Rejecting credentials")
return create_auth_failure_response()
if 'data/async' in str(url):
# Responses for an asynchronous SODA job
if str(url).endswith('data/async'):
self.first_job_pass = True
self.completed_job_key = "COMPLETED_JOB"
return create_soda_create_response('111-000-111-000')
elif str(url).endswith('/phase') and method == 'POST':
key = "RUN_JOB"
elif str(url).endswith('111-000-111-000/parameters') and method == 'POST':
assert "POS" in data
print(data['POS'])
pos_parts = data['POS'].split(' ')
assert len(pos_parts) == 4
self.completed_job_key = 'cutout_{}_{:.4f}_{:.4f}_{:.4f}'.format(pos_parts[0], float(pos_parts[1]),
float(pos_parts[2]), float(pos_parts[3]))
return create_soda_create_response('111-000-111-000')
elif str(url).endswith('111-000-111-000') and method == 'GET':
key = "RUN_JOB" if self.first_job_pass else self.completed_job_key
self.first_job_pass = False
else:
raise ValueError("Unexpected SODA async {} call to url {}".format(method, url))
elif 'datalink' in str(url):
if 'cube-244' in str(url):
key = 'DATALINK'
else:
key = 'DATALINK_NOACCESS'
elif str(url) == 'https://data.csiro.au/casda_vo_proxy/vo/tap/availability':
key = 'AVAILABILITY'
else:
key = params['POS'].split()[0] if params['POS'] else None
filename = data_path(DATA_FILES[key])
log.debug('providing ' + filename)
with open(filename, 'rb') as infile:
content = infile.read()
return MockResponse(content)
def create_soda_create_response(jobid):
job_url = 'https://casda.csiro.au/casda_data_access/data/async/' + jobid
create_response_headers = [
['location', job_url]
]
create_response = Mock(spec=requests.Response)
create_response.configure_mock(status_code=303, message='OK', headers=create_response_headers, url=job_url)
return create_response
def create_auth_failure_response():
unauthenticated_headers = [
['WWW-Authenticate', 'Basic realm="ATNF OPAL Login"']
]
create_response = MagicMock(spec=requests.Response)
attrs = {'raise_for_status.side_effect': requests.exceptions.HTTPError()}
create_response.configure_mock(status_code=401, message='OK', headers=unauthenticated_headers, **attrs)
return create_response
@pytest.fixture
def patch_get(request):
mp = request.getfixturevalue("monkeypatch")
mp.setattr(requests.Session, 'request', get_mockreturn)
return mp
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
def isclose(value1, value2, abs_tol=1e-09):
return abs(value1 - value2) < abs_tol
def fake_login(casda, username, password):
casda.USERNAME = username
casda._auth = (username, password)
casda._authenticated = True
def test_login_no_default_user():
casda = Casda()
assert casda._authenticated is False
assert casda.USERNAME == ''
with pytest.raises(LoginError, match=r"If you do not pass a username to login\(\),"):
Casda.login()
assert casda._authenticated is False
assert casda.USERNAME == ''
assert hasattr(casda, '_auth') is False
@pytest.mark.skip('No keyring backend on the CI server')
def test_login_keyring(patch_get):
casda = Casda()
assert casda._authenticated is False
assert casda.USERNAME == ''
keyring.set_password("astroquery:casda.csiro.au", USERNAME, PASSWORD)
casda.login(username=USERNAME)
keyring.delete_password("astroquery:casda.csiro.au", USERNAME)
assert casda._authenticated is True
assert casda.USERNAME == USERNAME
assert casda._auth == (USERNAME, PASSWORD)
def test_query_region_text_radius(patch_get):
ra = 333.9092
dec = -45.8418
radius = 0.5
query_payload = Casda.query_region('22h15m38.2s -45d50m30.5s', radius=radius * u.deg, cache=False,
get_query_payload=True)
assert isinstance(query_payload, dict)
assert 'POS' in query_payload
assert query_payload['POS'].startswith('CIRCLE 333')
pos_parts = query_payload['POS'].split(' ')
assert pos_parts[0] == 'CIRCLE'
assert isclose(float(pos_parts[1]), ra, abs_tol=1e-4)
assert isclose(float(pos_parts[2]), dec, abs_tol=1e-4)
assert isclose(float(pos_parts[3]), radius)
assert len(pos_parts) == 4
responses = Casda.query_region('22h15m38.2s -45d50m30.5s', radius=0.5 * u.deg, cache=False)
assert isinstance(responses, Table)
assert len(responses) == 3
def test_query_region_radius(patch_get):
ra = 333.9092
dec = -45.8418
radius = 0.5
centre = SkyCoord(ra, dec, unit=('deg', 'deg'))
query_payload = Casda.query_region(centre, radius=radius * u.deg, cache=False, get_query_payload=True)
assert isinstance(query_payload, dict)
assert 'POS' in query_payload
assert query_payload['POS'].startswith('CIRCLE 333')
pos_parts = query_payload['POS'].split(' ')
assert pos_parts[0] == 'CIRCLE'
assert isclose(float(pos_parts[1]), ra, abs_tol=1e-5)
assert isclose(float(pos_parts[2]), dec, abs_tol=1e-5)
assert isclose(float(pos_parts[3]), radius)
assert len(pos_parts) == 4
responses = Casda.query_region(centre, radius=0.5 * u.deg, cache=False)
assert isinstance(responses, Table)
assert len(responses) == 3
def test_query_region_async_radius(patch_get):
ra = 333.9092
dec = -45.8418
radius = 0.5
centre = SkyCoord(ra, dec, unit=('deg', 'deg'))
query_payload = Casda.query_region_async(centre, radius=radius * u.deg, cache=False, get_query_payload=True)
assert isinstance(query_payload, dict)
assert 'POS' in query_payload
assert query_payload['POS'].startswith('CIRCLE 333')
pos_parts = query_payload['POS'].split(' ')
assert pos_parts[0] == 'CIRCLE'
assert isclose(float(pos_parts[1]), ra, abs_tol=1e-5)
assert isclose(float(pos_parts[2]), dec, abs_tol=1e-5)
assert isclose(float(pos_parts[3]), radius)
assert len(pos_parts) == 4
responses = Casda.query_region_async(centre, radius=0.5 * u.deg, cache=False)
assert isinstance(responses, MockResponse)
def test_query_region_box(patch_get):
ra = 333.9092
dec = -45.8418
width = 0.5
height = 0.2
centre = SkyCoord(ra, dec, unit=('deg', 'deg'))
query_payload = Casda.query_region(centre, width=width * u.deg, height=height * u.deg, cache=False,
get_query_payload=True)
assert isinstance(query_payload, dict)
assert 'POS' in query_payload
assert query_payload['POS'].startswith('RANGE 333')
pos_parts = query_payload['POS'].split(' ')
assert pos_parts[0] == 'RANGE'
assert isclose(float(pos_parts[1]), ra - width / 2, abs_tol=1e-5)
assert isclose(float(pos_parts[2]), ra + width / 2, abs_tol=1e-5)
assert isclose(float(pos_parts[3]), dec - height / 2, abs_tol=1e-5)
assert isclose(float(pos_parts[4]), dec + height / 2, abs_tol=1e-5)
assert len(pos_parts) == 5
responses = Casda.query_region(centre, width=width * u.deg, height=height * u.deg, cache=False)
assert isinstance(responses, Table)
assert len(responses) == 2
def test_query_region_async_box(patch_get):
ra = 333.9092
dec = -45.8418
width = 0.5
height = 0.2
centre = SkyCoord(ra, dec, unit=('deg', 'deg'))
query_payload = Casda.query_region_async(centre, width=width * u.deg, height=height * u.deg, cache=False,
get_query_payload=True)
assert isinstance(query_payload, dict)
assert 'POS' in query_payload
assert query_payload['POS'].startswith('RANGE 333')
pos_parts = query_payload['POS'].split(' ')
assert pos_parts[0] == 'RANGE'
assert isclose(float(pos_parts[1]), ra - width / 2, abs_tol=1e-5)
assert isclose(float(pos_parts[2]), ra + width / 2, abs_tol=1e-5)
assert isclose(float(pos_parts[3]), dec - height / 2, abs_tol=1e-5)
assert isclose(float(pos_parts[4]), dec + height / 2, abs_tol=1e-5)
assert len(pos_parts) == 5
responses = Casda.query_region_async(centre, width=width * u.deg, height=height * u.deg, cache=False)
assert isinstance(responses, MockResponse)
def test_filter_out_unreleased():
with pytest.warns(W03):
all_records = parse(data_path('partial_unreleased.xml'), verify='warn').get_first_table().to_table()
assert all_records[0]['obs_release_date'] == '2017-08-02T03:51:19.728Z'
assert all_records[1]['obs_release_date'] == '2218-01-02T16:51:00.728Z'
assert all_records[2]['obs_release_date'] == ''
assert len(all_records) == 3
# This should filter out the rows with either a future obs_release_date or no obs_release_date
filtered = Casda.filter_out_unreleased(all_records)
assert filtered[0]['obs_release_date'] == '2017-08-02T03:51:19.728Z'
assert filtered[0]['obs_publisher_did'] == 'cube-502'
assert len(filtered) == 1
def test_stage_data_unauthorised(patch_get):
table = Table()
with pytest.raises(ValueError, match=r"Credentials must be supplied"):
Casda.stage_data(table)
def test_stage_data_empty(patch_get):
table = Table()
casda = Casda()
fake_login(casda, USERNAME, PASSWORD)
urls = casda.stage_data(table)
assert urls == []
def test_stage_data_invalid_credentials(patch_get):
prefix = 'https://somewhere/casda/datalink/links?'
access_urls = [prefix + 'cube-220']
table = Table([Column(data=access_urls, name='access_url')])
casda = Casda()
# Update the casda object to indicate that it has been authenticated
casda.USERNAME = USERNAME
casda._auth = (USERNAME, 'notthepassword')
casda._authenticated = True
with pytest.raises(requests.exceptions.HTTPError):
casda.stage_data(table)
def test_stage_data_no_link(patch_get):
prefix = 'https://somewhere/casda/datalink/links?'
access_urls = [prefix + 'cube-240']
table = Table([Column(data=access_urls, name='access_url')])
casda = Casda()
fake_login(casda, USERNAME, PASSWORD)
casda.POLL_INTERVAL = 1
with pytest.raises(ValueError, match=r"You do not have access to any of the requested data files\."):
casda.stage_data(table)
def test_stage_data(patch_get):
prefix = 'https://somewhere/casda/datalink/links?'
access_urls = [prefix + 'cube-244']
table = Table([Column(data=access_urls, name='access_url')])
casda = Casda()
fake_login(casda, USERNAME, PASSWORD)
casda.POLL_INTERVAL = 1
with pytest.warns(W50, match="Invalid unit string 'pixels'"):
urls = casda.stage_data(table, verbose=True)
assert urls == ['http://casda.csiro.au/download/web/111-000-111-000/askap_img.fits.checksum',
'http://casda.csiro.au/download/web/111-000-111-000/askap_img.fits']
def test_cutout(patch_get):
prefix = 'https://somewhere/casda/datalink/links?'
access_urls = [prefix + 'cube-244']
table = Table([Column(data=access_urls, name='access_url')])
ra = 333.9092*u.deg
dec = -45.8418*u.deg
radius = 30*u.arcmin
centre = SkyCoord(ra, dec)
casda = Casda()
fake_login(casda, USERNAME, PASSWORD)
casda.POLL_INTERVAL = 1
with pytest.warns(W50, match="Invalid unit string 'pixels'"):
urls = casda.cutout(table, coordinates=centre, radius=radius, verbose=True)
assert urls == ['http://casda.csiro.au/download/web/111-000-111-000/cutout.fits.checksum',
'http://casda.csiro.au/download/web/111-000-111-000/cutout.fits']
def test_cutout_no_args(patch_get):
prefix = 'https://somewhere/casda/datalink/links?'
access_urls = [prefix + 'cube-244']
table = Table([Column(data=access_urls, name='access_url')])
casda = Casda()
fake_login(casda, USERNAME, PASSWORD)
casda.POLL_INTERVAL = 1
with pytest.raises(ValueError,
match=r"Please provide cutout parameters such as coordinates, band or channel\."):
with pytest.warns(W50, match="Invalid unit string 'pixels'"):
casda.cutout(table)
def test_cutout_unauthorised(patch_get):
prefix = 'https://somewhere/casda/datalink/links?'
access_urls = [prefix + 'cube-244']
table = Table([Column(data=access_urls, name='access_url')])
ra = 333.9092*u.deg
dec = -45.8418*u.deg
radius = 30*u.arcmin
centre = SkyCoord(ra, dec)
with pytest.raises(ValueError, match=r"Credentials must be supplied to download CASDA image data"):
Casda.cutout(table, coordinates=centre, radius=radius, verbose=True)
def test_cutout_no_table(patch_get):
casda = Casda()
fake_login(casda, USERNAME, PASSWORD)
casda.POLL_INTERVAL = 1
result = casda.cutout(None)
assert result == []
def test_args_to_payload_band(patch_get):
casda = Casda()
fake_login(casda, USERNAME, PASSWORD)
payload = casda._args_to_payload(band=(0.195*u.m, 0.215*u.m))
assert payload['BAND'] == '0.195 0.215'
assert list(payload.keys()) == ['BAND']
payload = casda._args_to_payload(band=(0.215*u.m, 0.195*u.m))
assert payload['BAND'] == '0.195 0.215'
assert list(payload.keys()) == ['BAND']
payload = casda._args_to_payload(band=(0.195*u.m, 21.5*u.cm))
assert payload['BAND'] == '0.195 0.215'
assert list(payload.keys()) == ['BAND']
payload = casda._args_to_payload(band=(None, 0.215*u.m))
assert payload['BAND'] == '-Inf 0.215'
assert list(payload.keys()) == ['BAND']
payload = casda._args_to_payload(band=(0.195*u.m, None))
assert payload['BAND'] == '0.195 +Inf'
assert list(payload.keys()) == ['BAND']
payload = casda._args_to_payload(band=(1.42*u.GHz, 1.5*u.GHz))
assert payload['BAND'] == '0.19986163866666667 0.21112144929577467'
assert list(payload.keys()) == ['BAND']
payload = casda._args_to_payload(band=np.array([1.5, 1.42])*u.GHz)
assert payload['BAND'] == '0.19986163866666667 0.21112144929577467'
assert list(payload.keys()) == ['BAND']
payload = casda._args_to_payload(band=(None, 1.5*u.GHz))
assert payload['BAND'] == '0.19986163866666667 +Inf'
assert list(payload.keys()) == ['BAND']
payload = casda._args_to_payload(band=(1.42*u.GHz, None))
assert payload['BAND'] == '-Inf 0.21112144929577467'
assert list(payload.keys()) == ['BAND']
def test_args_to_payload_band_invalid(patch_get):
casda = Casda()
fake_login(casda, USERNAME, PASSWORD)
with pytest.raises(ValueError) as excinfo:
casda._args_to_payload(band='foo')
assert "The 'band' value must be a list of 2 wavelength or frequency values." in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
casda._args_to_payload(band=(0.195*u.m, 0.215*u.m, 0.3*u.m))
assert "The 'band' value must be a list of 2 wavelength or frequency values." in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
casda._args_to_payload(band=('a', 0.215*u.m))
assert "The 'band' value must be a list of 2 wavelength or frequency values." in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
casda._args_to_payload(band=(1.42*u.GHz, 21*u.cm))
assert "The 'band' values must have the same kind of units." in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
casda._args_to_payload(band=[1.42*u.radian, 21*u.deg])
assert "The 'band' values must be wavelengths or frequencies." in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
casda._args_to_payload(band=(1.42*u.GHz, 1.5*u.GHz), channel=(5, 10))
assert "Either 'channel' or 'band' values may be provided but not both." in str(excinfo.value)
def test_args_to_payload_channel(patch_get):
casda = Casda()
fake_login(casda, USERNAME, PASSWORD)
payload = casda._args_to_payload(channel=(0, 30))
assert payload['CHANNEL'] == '0 30'
assert list(payload.keys()) == ['CHANNEL']
payload = casda._args_to_payload(channel=np.array([17, 23]))
assert payload['CHANNEL'] == '17 23'
assert list(payload.keys()) == ['CHANNEL']
payload = casda._args_to_payload(channel=(23, 17))
assert payload['CHANNEL'] == '17 23'
assert list(payload.keys()) == ['CHANNEL']
def test_args_to_payload_channel_invalid(patch_get):
casda = Casda()
fake_login(casda, USERNAME, PASSWORD)
with pytest.raises(ValueError) as excinfo:
casda._args_to_payload(channel='one')
assert "The 'channel' value must be a list of 2 integer values." in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
casda._args_to_payload(channel=(1.42*u.GHz, 1.5*u.GHz))
assert "The 'channel' value must be a list of 2 integer values." in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
casda._args_to_payload(channel=(None, 5))
assert "The 'channel' value must be a list of 2 integer values." in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
casda._args_to_payload(channel=(5))
assert "The 'channel' value must be a list of 2 integer values." in str(excinfo.value)
def test_args_to_payload_coordinates(patch_get):
casda = Casda()
fake_login(casda, USERNAME, PASSWORD)
cutout_coords = SkyCoord(ra=345.245*u.degree, dec=-32.125*u.degree, frame='icrs')
payload = casda._args_to_payload(coordinates=cutout_coords)
assert payload['POS'].startswith('CIRCLE 345')
pos_parts = payload['POS'].split(' ')
assert pos_parts[0] == 'CIRCLE'
assert isclose(float(pos_parts[1]), 345.245, abs_tol=1e-4)
assert isclose(float(pos_parts[2]), -32.125, abs_tol=1e-4)
assert isclose(float(pos_parts[3]), 1/60)
assert len(pos_parts) == 4
assert list(payload.keys()) == ['POS']
cutout_coords = SkyCoord(ra=187.5*u.degree, dec=-60.0*u.degree, frame='icrs')
payload = casda._args_to_payload(coordinates=cutout_coords, radius=900*u.arcsec)
assert payload['POS'].startswith('CIRCLE 187')
pos_parts = payload['POS'].split(' ')
assert pos_parts[0] == 'CIRCLE'
assert isclose(float(pos_parts[1]), 187.5, abs_tol=1e-4)
assert isclose(float(pos_parts[2]), -60.0, abs_tol=1e-4)
assert isclose(float(pos_parts[3]), 0.25)
assert len(pos_parts) == 4
assert list(payload.keys()) == ['POS']
cutout_coords = SkyCoord(ra=187.5*u.degree, dec=-60.0*u.degree, frame='icrs')
payload = casda._args_to_payload(coordinates=cutout_coords, width=2*u.arcmin, height=3*u.arcmin)
assert payload['POS'].startswith('RANGE 187')
pos_parts = payload['POS'].split(' ')
assert pos_parts[0] == 'RANGE'
assert isclose(float(pos_parts[1]), 187.5-1/60, abs_tol=1e-4)
assert isclose(float(pos_parts[2]), 187.5+1/60, abs_tol=1e-4)
assert isclose(float(pos_parts[3]), -60.0-1.5/60, abs_tol=1e-4)
assert isclose(float(pos_parts[4]), -60.0+1.5/60, abs_tol=1e-4)
assert len(pos_parts) == 5
assert list(payload.keys()) == ['POS']
def test_args_to_payload_combined(patch_get):
casda = Casda()
fake_login(casda, USERNAME, PASSWORD)
cutout_coords = SkyCoord(ra=187.5*u.degree, dec=-60.0*u.degree, frame='icrs')
payload = casda._args_to_payload(coordinates=cutout_coords, channel=(17, 23))
assert payload['POS'].startswith('CIRCLE 187')
pos_parts = payload['POS'].split(' ')
assert pos_parts[0] == 'CIRCLE'
assert isclose(float(pos_parts[1]), 187.5, abs_tol=1e-4)
assert isclose(float(pos_parts[2]), -60.0, abs_tol=1e-4)
assert isclose(float(pos_parts[3]), 1/60)
assert len(pos_parts) == 4
assert payload['CHANNEL'] == '17 23'
assert set(payload.keys()) == set(['CHANNEL', 'POS'])
def test_download_file(patch_get):
urls = ['https://ingest.pawsey.org/bucket_name/path/askap_img.fits?security=stuff',
'http://casda.csiro.au/download/web/111-000-111-000/askap_img.fits.checksum',
'https://ingest.pawsey.org.au/casda-prd-as110-01/dc52217/primary_images/RACS-DR1_0000%2B18A.fits?security=stuff']
casda = Casda()
fake_login(casda, USERNAME, PASSWORD)
# skip the actual downloading of the file
download_mock = MagicMock()
casda._download_file = download_mock
filenames = casda.download_files(urls)
assert filenames[0].endswith('askap_img.fits')
assert filenames[1].endswith('askap_img.fits.checksum')
assert filenames[2].endswith('RACS-DR1_0000+18A.fits')
| {
"content_hash": "cfbab6d25687aab11237ab541135a9ec",
"timestamp": "",
"source": "github",
"line_count": 569,
"max_line_length": 125,
"avg_line_length": 38.81722319859402,
"alnum_prop": 0.6533254855797528,
"repo_name": "imbasimba/astroquery",
"id": "8a34fafe62ad259769ad9db97c68552cc910fbd3",
"size": "22176",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "astroquery/casda/tests/test_casda.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "493404"
},
{
"name": "Python",
"bytes": "2852847"
}
],
"symlink_target": ""
} |
from equations import *
from texttree import TextTree
def find_best_attribute_to_split_on(training_set):
"""Which attribute has the highest information gain?"""
best_attr = None
best_gain = None
for attr in [key for key in training_set[0].keys() if key != 'classes']:
information_gain = I(training_set, attr)
if best_gain is None or information_gain > best_gain:
best_gain = information_gain
best_attr = attr
return best_attr
def unique_values(training_set, attribute):
"""Return all the different values that the given attribute has in the given training data"""
values = [x[attribute] for x in training_set]
return dict([(a, '') for a in values]).keys()
def branch_on_attribute(training_set, attr, value):
# Only include cases where attribute has the given value
training_set = [c for c in training_set if c[attr] == value]
# Remove the attribute from the cases
training_set = [dict([(a,v) for a,v in x.items() if a != attr]) for x in training_set]
return training_set
def stopping_criterion_met(training_set):
# Split recursively until all cases in same class.
# Find if all in same class...
# So at least will need to look at each training set
# And attributes don't matter, just looking at classes so...
# All are in same class if... ?
class_memberships = [x['classes'] for x in training_set]
classes_with_members = [[k for k,v in single.items() if v > 0] for single in class_memberships]
flattened = reduce(lambda a,b:a+b, classes_with_members)
uniqued = list(set(flattened))
return len(uniqued) == 1
# Ugh I'm feeling stupid today :-/
# Approach: count for each class how many members it has.
# Then if all except one have zero membership then true.
return False
def make_decision_tree(training_set):
# The tree is the attribute to split on and subtrees have the different values of the attribute, under which are the next splits.
#
# Attribute
# __|__
# | |
# Value Value
# | |
# | |
# next next
# attr attr
# tree tree
#
attr = find_best_attribute_to_split_on(training_set)
if stopping_criterion_met(training_set) or attr is None:
# If there are no more attributes to split on, pick class based on majority.
likeliest_class = sorted(training_set[0]['classes'].items(), cmp=lambda a,b:b[1]-a[1])[0][0]
return (likeliest_class,)
# Nodes are all the possible values of the picked attribute, sorted to alphabetical order.
values = sorted(unique_values(training_set, attr))
child_nodes = tuple([(value,make_decision_tree(branch_on_attribute(training_set, attr, value))) for value in values])
return (attr,) + child_nodes
| {
"content_hash": "0f1fe6552b626b533da2d4c1e616c082",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 130,
"avg_line_length": 34.19230769230769,
"alnum_prop": 0.6940382452193475,
"repo_name": "Bemmu/DecisionTree",
"id": "8418a29e45d6b2cea0817696acaad05ffdfd4e16",
"size": "2775",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "decisiontree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15542"
}
],
"symlink_target": ""
} |
import os
import unittest
import numpy as np
from pymatgen import Structure
from pymatgen.util.testing import PymatgenTest
from pymatgen.io.abinit import ETSF_Reader
try:
import netCDF4
except ImportError:
netCDF4 = None
_test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files', "abinit")
def ref_file(filename):
return os.path.join(_test_dir, filename)
class ETSF_Reader_TestCase(PymatgenTest):
def setUp(self):
formulas = ["Si2",]
self.GSR_paths = d = {}
for formula in formulas:
d[formula] = ref_file(formula + "_GSR.nc")
@unittest.skipIf(netCDF4 is None, "Requires Netcdf4")
def test_read_Si2(self):
path = self.GSR_paths["Si2"]
ref_dims = {
"number_of_spins": 1
}
ref_int_values = {
"space_group": 227,
"number_of_states": np.reshape([15, 15], (1,2)),
}
ref_float_values = {
"etotal": -8.85911566912484,
"primitive_vectors": np.reshape([0, 5.125, 5.125, 5.125, 0, 5.125,
5.125, 5.125, 0], (3,3)),
}
with ETSF_Reader(path) as data:
self.assertEqual(data.ngroups, 1)
print(data.read_varnames())
# Test dimensions.
for dimname, int_ref in ref_dims.items():
value = data.read_dimvalue(dimname)
self.assertArrayEqual(value, int_ref)
# Test int variables
for varname, int_ref in ref_int_values.items():
value = data.read_value(varname)
print(varname, value)
self.assertArrayEqual(value, int_ref)
# Test float variables
for varname, float_ref in ref_float_values.items():
value = data.read_value(varname)
print(varname, value)
self.assertArrayAlmostEqual(value, float_ref)
#assert 0
# Reading non-existent variables or dims should raise
# a subclass of NetcdReaderError
with self.assertRaises(data.Error):
data.read_value("foobar")
with self.assertRaises(data.Error):
data.read_dimvalue("foobar")
# Unless default is given
assert data.read_value("foobar", default=None) is None
data.print_tree()
for group in data.walk_tree():
print("group: " + str(group))
# Initialize pymatgen structure from GSR.
structure = data.read_structure()
self.assertTrue(isinstance(structure, Structure))
# Read ixc.
# TODO: Upgrade GSR file.
#xc = data.read_abinit_xcfunc()
#assert xc == "LDA"
| {
"content_hash": "5668e9eff1a7b06a89b0d10b713093ec",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 78,
"avg_line_length": 30.56989247311828,
"alnum_prop": 0.5392191347168483,
"repo_name": "blondegeek/pymatgen",
"id": "ca0d37639cad58c402f7c1aa2ac8eb2fa3341356",
"size": "2953",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pymatgen/io/abinit/tests/test_netcdf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5100"
},
{
"name": "CSS",
"bytes": "7550"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "Dockerfile",
"bytes": "275"
},
{
"name": "HTML",
"bytes": "827"
},
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "15152267"
},
{
"name": "Python",
"bytes": "7756735"
},
{
"name": "Roff",
"bytes": "1898220"
}
],
"symlink_target": ""
} |
from django.db import models
class Student(models.Model):
"""Student Model"""
class Meta(object):
verbose_name = "Студент"
verbose_name_plural = "Студенти"
first_name = models.CharField(
max_length=256,
blank=False,
verbose_name='Ім’я')
last_name = models.CharField(
max_length=256,
blank=False,
verbose_name='Прізвище')
middle_name = models.CharField(
max_length=256,
blank=True,
verbose_name='По-батькові',
default='')
birthday = models.DateField(
blank=False,
verbose_name='Дата народження',
null=True)
photo = models.ImageField(
blank=True,
verbose_name='Фото',
null=True)
ticket = models.CharField(
max_length=256,
blank=False,
verbose_name='Білет')
student_group = models.ForeignKey(
'Group',
verbose_name='Група',
blank=False,
null=True,
on_delete=models.PROTECT)
notes = models.TextField(
blank=True,
verbose_name='Додаткові нотатки')
def __str__(self):
return "%s %s" % (self.last_name, self.first_name)
| {
"content_hash": "c11921878116ef1c463b2c364d35066a",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 58,
"avg_line_length": 22.24074074074074,
"alnum_prop": 0.5661948376353039,
"repo_name": "samitnuk/studentsdb",
"id": "511d28c4942ac48428fc368ec58c20b6dda21ccb",
"size": "1283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "students/models/students.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "730"
},
{
"name": "HTML",
"bytes": "25077"
},
{
"name": "JavaScript",
"bytes": "9416"
},
{
"name": "Python",
"bytes": "42313"
}
],
"symlink_target": ""
} |
"""Configurations for Encoders."""
from typing import Optional
import dataclasses
from official.modeling.hyperparams import base_config
from official.nlp.configs import encoders
@dataclasses.dataclass
class SmallEncoderConfig(base_config.Config):
"""Encoder config for the lowcost stage."""
net2net_ratio: float = 0.25
net2net_layers: Optional[int] = None
fcfact_ratio: float = 0.2
fcfact_layers: Optional[int] = None
kq_ratio: Optional[float] = None
lightatt_layers: Optional[int] = None
input_pool_name: Optional[str] = None
input_pool_size: Optional[int] = None
override_num_layers: Optional[int] = None
@dataclasses.dataclass
class TransformerEncoderConfig(base_config.Config):
"""BERT encoder configuration."""
vocab_size: int = 30522
hidden_size: int = 768
num_layers: int = 12
num_attention_heads: int = 12
hidden_activation: str = 'gelu'
intermediate_size: int = 3072
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
max_position_embeddings: int = 512
type_vocab_size: int = 2
initializer_range: float = 0.02
embedding_size: Optional[int] = None
# Small model configuration
net2net_ratio: float = 0.25
net2net_layers: Optional[int] = None
fcfact_ratio: float = 0.2
fcfact_layers: Optional[int] = None
kq_ratio: Optional[float] = None
lightatt_layers: Optional[int] = None
input_pool_name: Optional[str] = None
input_pool_size: Optional[int] = None
def from_bert_encoder_config(official_cfg, small_cfg):
"""Create the encoder from a config object."""
if isinstance(official_cfg, encoders.BertEncoderConfig):
official_cfg = official_cfg.as_dict()
if isinstance(small_cfg, SmallEncoderConfig):
small_cfg = small_cfg.as_dict()
num_layers = official_cfg['num_layers']
if small_cfg['override_num_layers'] is not None:
num_layers = small_cfg['override_num_layers']
assert small_cfg['fcfact_layers'] is None or small_cfg[
'net2net_layers'] is None
return TransformerEncoderConfig(
vocab_size=official_cfg['vocab_size'],
hidden_size=official_cfg['hidden_size'],
num_layers=num_layers,
num_attention_heads=official_cfg['num_attention_heads'],
hidden_activation=official_cfg['hidden_activation'],
intermediate_size=official_cfg['intermediate_size'],
dropout_rate=official_cfg['dropout_rate'],
attention_dropout_rate=official_cfg['attention_dropout_rate'],
max_position_embeddings=official_cfg['max_position_embeddings'],
type_vocab_size=official_cfg['type_vocab_size'],
initializer_range=official_cfg['initializer_range'],
embedding_size=official_cfg['embedding_size'],
net2net_ratio=small_cfg['net2net_ratio'],
net2net_layers=small_cfg['net2net_layers'],
fcfact_ratio=small_cfg['fcfact_ratio'],
fcfact_layers=small_cfg['fcfact_layers'],
kq_ratio=small_cfg['kq_ratio'],
lightatt_layers=small_cfg['lightatt_layers'],
input_pool_name=small_cfg['input_pool_name'],
input_pool_size=small_cfg['input_pool_size'],
)
| {
"content_hash": "c84b7bc903d22d03aaff4f51464cab46",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 70,
"avg_line_length": 36.71084337349398,
"alnum_prop": 0.7095503774204135,
"repo_name": "google-research/google-research",
"id": "35dfd75fda2e88360e1359038448c3bdf6e90c52",
"size": "3655",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grow_bert/lowcost/config/config_encoder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
import logging
import random
from mopidy import exceptions
from mopidy.core import listener
from mopidy.internal import deprecation, validation
from mopidy.models import TlTrack, Track
logger = logging.getLogger(__name__)
class TracklistController(object):
pykka_traversable = True
def __init__(self, core):
self.core = core
self._next_tlid = 1
self._tl_tracks = []
self._version = 0
self._shuffled = []
# Properties
def get_tl_tracks(self):
"""Get tracklist as list of :class:`mopidy.models.TlTrack`."""
return self._tl_tracks[:]
tl_tracks = deprecation.deprecated_property(get_tl_tracks)
"""
.. deprecated:: 1.0
Use :meth:`get_tl_tracks` instead.
"""
def get_tracks(self):
"""Get tracklist as list of :class:`mopidy.models.Track`."""
return [tl_track.track for tl_track in self._tl_tracks]
tracks = deprecation.deprecated_property(get_tracks)
"""
.. deprecated:: 1.0
Use :meth:`get_tracks` instead.
"""
def get_length(self):
"""Get length of the tracklist."""
return len(self._tl_tracks)
length = deprecation.deprecated_property(get_length)
"""
.. deprecated:: 1.0
Use :meth:`get_length` instead.
"""
def get_version(self):
"""
Get the tracklist version.
Integer which is increased every time the tracklist is changed. Is not
reset before Mopidy is restarted.
"""
return self._version
def _increase_version(self):
self._version += 1
self.core.playback._on_tracklist_change()
self._trigger_tracklist_changed()
version = deprecation.deprecated_property(get_version)
"""
.. deprecated:: 1.0
Use :meth:`get_version` instead.
"""
def get_consume(self):
"""Get consume mode.
:class:`True`
Tracks are removed from the tracklist when they have been played.
:class:`False`
Tracks are not removed from the tracklist.
"""
return getattr(self, '_consume', False)
def set_consume(self, value):
"""Set consume mode.
:class:`True`
Tracks are removed from the tracklist when they have been played.
:class:`False`
Tracks are not removed from the tracklist.
"""
validation.check_boolean(value)
if self.get_consume() != value:
self._trigger_options_changed()
return setattr(self, '_consume', value)
consume = deprecation.deprecated_property(get_consume, set_consume)
"""
.. deprecated:: 1.0
Use :meth:`get_consume` and :meth:`set_consume` instead.
"""
def get_random(self):
"""Get random mode.
:class:`True`
Tracks are selected at random from the tracklist.
:class:`False`
Tracks are played in the order of the tracklist.
"""
return getattr(self, '_random', False)
def set_random(self, value):
"""Set random mode.
:class:`True`
Tracks are selected at random from the tracklist.
:class:`False`
Tracks are played in the order of the tracklist.
"""
validation.check_boolean(value)
if self.get_random() != value:
self._trigger_options_changed()
if value:
self._shuffled = self.get_tl_tracks()
random.shuffle(self._shuffled)
return setattr(self, '_random', value)
random = deprecation.deprecated_property(get_random, set_random)
"""
.. deprecated:: 1.0
Use :meth:`get_random` and :meth:`set_random` instead.
"""
def get_repeat(self):
"""
Get repeat mode.
:class:`True`
The tracklist is played repeatedly.
:class:`False`
The tracklist is played once.
"""
return getattr(self, '_repeat', False)
def set_repeat(self, value):
"""
Set repeat mode.
To repeat a single track, set both ``repeat`` and ``single``.
:class:`True`
The tracklist is played repeatedly.
:class:`False`
The tracklist is played once.
"""
validation.check_boolean(value)
if self.get_repeat() != value:
self._trigger_options_changed()
return setattr(self, '_repeat', value)
repeat = deprecation.deprecated_property(get_repeat, set_repeat)
"""
.. deprecated:: 1.0
Use :meth:`get_repeat` and :meth:`set_repeat` instead.
"""
def get_single(self):
"""
Get single mode.
:class:`True`
Playback is stopped after current song, unless in ``repeat`` mode.
:class:`False`
Playback continues after current song.
"""
return getattr(self, '_single', False)
def set_single(self, value):
"""
Set single mode.
:class:`True`
Playback is stopped after current song, unless in ``repeat`` mode.
:class:`False`
Playback continues after current song.
"""
validation.check_boolean(value)
if self.get_single() != value:
self._trigger_options_changed()
return setattr(self, '_single', value)
single = deprecation.deprecated_property(get_single, set_single)
"""
.. deprecated:: 1.0
Use :meth:`get_single` and :meth:`set_single` instead.
"""
# Methods
def index(self, tl_track=None, tlid=None):
"""
The position of the given track in the tracklist.
If neither *tl_track* or *tlid* is given we return the index of
the currently playing track.
:param tl_track: the track to find the index of
:type tl_track: :class:`mopidy.models.TlTrack` or :class:`None`
:param tlid: TLID of the track to find the index of
:type tlid: :class:`int` or :class:`None`
:rtype: :class:`int` or :class:`None`
.. versionadded:: 1.1
The *tlid* parameter
"""
tl_track is None or validation.check_instance(tl_track, TlTrack)
tlid is None or validation.check_integer(tlid, min=1)
if tl_track is None and tlid is None:
tl_track = self.core.playback.get_current_tl_track()
if tl_track is not None:
try:
return self._tl_tracks.index(tl_track)
except ValueError:
pass
elif tlid is not None:
for i, tl_track in enumerate(self._tl_tracks):
if tl_track.tlid == tlid:
return i
return None
def get_eot_tlid(self):
"""
The TLID of the track that will be played after the current track.
Not necessarily the same TLID as returned by :meth:`get_next_tlid`.
:rtype: :class:`int` or :class:`None`
.. versionadded:: 1.1
"""
current_tl_track = self.core.playback.get_current_tl_track()
return getattr(self.eot_track(current_tl_track), 'tlid', None)
def eot_track(self, tl_track):
"""
The track that will be played after the given track.
Not necessarily the same track as :meth:`next_track`.
:param tl_track: the reference track
:type tl_track: :class:`mopidy.models.TlTrack` or :class:`None`
:rtype: :class:`mopidy.models.TlTrack` or :class:`None`
"""
deprecation.warn('core.tracklist.eot_track', pending=True)
tl_track is None or validation.check_instance(tl_track, TlTrack)
if self.get_single() and self.get_repeat():
return tl_track
elif self.get_single():
return None
# Current difference between next and EOT handling is that EOT needs to
# handle "single", with that out of the way the rest of the logic is
# shared.
return self.next_track(tl_track)
def get_next_tlid(self):
"""
The tlid of the track that will be played if calling
:meth:`mopidy.core.PlaybackController.next()`.
For normal playback this is the next track in the tracklist. If repeat
is enabled the next track can loop around the tracklist. When random is
enabled this should be a random track, all tracks should be played once
before the tracklist repeats.
:rtype: :class:`int` or :class:`None`
.. versionadded:: 1.1
"""
current_tl_track = self.core.playback.get_current_tl_track()
return getattr(self.next_track(current_tl_track), 'tlid', None)
def next_track(self, tl_track):
"""
The track that will be played if calling
:meth:`mopidy.core.PlaybackController.next()`.
For normal playback this is the next track in the tracklist. If repeat
is enabled the next track can loop around the tracklist. When random is
enabled this should be a random track, all tracks should be played once
before the tracklist repeats.
:param tl_track: the reference track
:type tl_track: :class:`mopidy.models.TlTrack` or :class:`None`
:rtype: :class:`mopidy.models.TlTrack` or :class:`None`
"""
deprecation.warn('core.tracklist.next_track', pending=True)
tl_track is None or validation.check_instance(tl_track, TlTrack)
if not self._tl_tracks:
return None
if self.get_random() and not self._shuffled:
if self.get_repeat() or not tl_track:
logger.debug('Shuffling tracks')
self._shuffled = self._tl_tracks[:]
random.shuffle(self._shuffled)
if self.get_random():
if self._shuffled:
return self._shuffled[0]
return None
next_index = self.index(tl_track)
if next_index is None:
next_index = 0
else:
next_index += 1
if self.get_repeat():
next_index %= len(self._tl_tracks)
elif next_index >= len(self._tl_tracks):
return None
return self._tl_tracks[next_index]
def get_previous_tlid(self):
"""
Returns the TLID of the track that will be played if calling
:meth:`mopidy.core.PlaybackController.previous()`.
For normal playback this is the previous track in the tracklist. If
random and/or consume is enabled it should return the current track
instead.
:rtype: :class:`int` or :class:`None`
.. versionadded:: 1.1
"""
current_tl_track = self.core.playback.get_current_tl_track()
return getattr(self.previous_track(current_tl_track), 'tlid', None)
def previous_track(self, tl_track):
"""
Returns the track that will be played if calling
:meth:`mopidy.core.PlaybackController.previous()`.
For normal playback this is the previous track in the tracklist. If
random and/or consume is enabled it should return the current track
instead.
:param tl_track: the reference track
:type tl_track: :class:`mopidy.models.TlTrack` or :class:`None`
:rtype: :class:`mopidy.models.TlTrack` or :class:`None`
"""
deprecation.warn('core.tracklist.previous_track', pending=True)
tl_track is None or validation.check_instance(tl_track, TlTrack)
if self.get_repeat() or self.get_consume() or self.get_random():
return tl_track
position = self.index(tl_track)
if position in (None, 0):
return None
# Since we know we are not at zero we have to be somewhere in the range
# 1 - len(tracks) Thus 'position - 1' will always be within the list.
return self._tl_tracks[position - 1]
def add(self, tracks=None, at_position=None, uri=None, uris=None):
"""
Add tracks to the tracklist.
If ``uri`` is given instead of ``tracks``, the URI is looked up in the
library and the resulting tracks are added to the tracklist.
If ``uris`` is given instead of ``uri`` or ``tracks``, the URIs are
looked up in the library and the resulting tracks are added to the
tracklist.
If ``at_position`` is given, the tracks are inserted at the given
position in the tracklist. If ``at_position`` is not given, the tracks
are appended to the end of the tracklist.
Triggers the :meth:`mopidy.core.CoreListener.tracklist_changed` event.
:param tracks: tracks to add
:type tracks: list of :class:`mopidy.models.Track` or :class:`None`
:param at_position: position in tracklist to add tracks
:type at_position: int or :class:`None`
:param uri: URI for tracks to add
:type uri: string or :class:`None`
:param uris: list of URIs for tracks to add
:type uris: list of string or :class:`None`
:rtype: list of :class:`mopidy.models.TlTrack`
.. versionadded:: 1.0
The ``uris`` argument.
.. deprecated:: 1.0
The ``tracks`` and ``uri`` arguments. Use ``uris``.
"""
if sum(o is not None for o in [tracks, uri, uris]) != 1:
raise ValueError(
'Exactly one of "tracks", "uri" or "uris" must be set')
tracks is None or validation.check_instances(tracks, Track)
uri is None or validation.check_uri(uri)
uris is None or validation.check_uris(uris)
validation.check_integer(at_position or 0)
if tracks:
deprecation.warn('core.tracklist.add:tracks_arg')
if uri:
deprecation.warn('core.tracklist.add:uri_arg')
if tracks is None:
if uri is not None:
uris = [uri]
tracks = []
track_map = self.core.library.lookup(uris=uris)
for uri in uris:
tracks.extend(track_map[uri])
tl_tracks = []
max_length = self.core._config['core']['max_tracklist_length']
for track in tracks:
if self.get_length() >= max_length:
raise exceptions.TracklistFull(
'Tracklist may contain at most %d tracks.' % max_length)
tl_track = TlTrack(self._next_tlid, track)
self._next_tlid += 1
if at_position is not None:
self._tl_tracks.insert(at_position, tl_track)
at_position += 1
else:
self._tl_tracks.append(tl_track)
tl_tracks.append(tl_track)
if tl_tracks:
self._increase_version()
return tl_tracks
def clear(self):
"""
Clear the tracklist.
Triggers the :meth:`mopidy.core.CoreListener.tracklist_changed` event.
"""
self._tl_tracks = []
self._increase_version()
def filter(self, criteria=None, **kwargs):
"""
Filter the tracklist by the given criterias.
A criteria consists of a model field to check and a list of values to
compare it against. If the model field matches one of the values, it
may be returned.
Only tracks that matches all the given criterias are returned.
Examples::
# Returns tracks with TLIDs 1, 2, 3, or 4 (tracklist ID)
filter({'tlid': [1, 2, 3, 4]})
# Returns track with URIs 'xyz' or 'abc'
filter({'uri': ['xyz', 'abc']})
# Returns track with a matching TLIDs (1, 3 or 6) and a
# matching URI ('xyz' or 'abc')
filter({'tlid': [1, 3, 6], 'uri': ['xyz', 'abc']})
:param criteria: on or more criteria to match by
:type criteria: dict, of (string, list) pairs
:rtype: list of :class:`mopidy.models.TlTrack`
.. deprecated:: 1.1
Providing the criteria via ``kwargs``.
"""
if kwargs:
deprecation.warn('core.tracklist.filter:kwargs_criteria')
criteria = criteria or kwargs
tlids = criteria.pop('tlid', [])
validation.check_query(criteria, validation.TRACKLIST_FIELDS)
validation.check_instances(tlids, int)
matches = self._tl_tracks
for (key, values) in criteria.items():
matches = [
ct for ct in matches if getattr(ct.track, key) in values]
if tlids:
matches = [ct for ct in matches if ct.tlid in tlids]
return matches
def move(self, start, end, to_position):
"""
Move the tracks in the slice ``[start:end]`` to ``to_position``.
Triggers the :meth:`mopidy.core.CoreListener.tracklist_changed` event.
:param start: position of first track to move
:type start: int
:param end: position after last track to move
:type end: int
:param to_position: new position for the tracks
:type to_position: int
"""
if start == end:
end += 1
tl_tracks = self._tl_tracks
# TODO: use validation helpers?
assert start < end, 'start must be smaller than end'
assert start >= 0, 'start must be at least zero'
assert end <= len(tl_tracks), \
'end can not be larger than tracklist length'
assert to_position >= 0, 'to_position must be at least zero'
assert to_position <= len(tl_tracks), \
'to_position can not be larger than tracklist length'
new_tl_tracks = tl_tracks[:start] + tl_tracks[end:]
for tl_track in tl_tracks[start:end]:
new_tl_tracks.insert(to_position, tl_track)
to_position += 1
self._tl_tracks = new_tl_tracks
self._increase_version()
def remove(self, criteria=None, **kwargs):
"""
Remove the matching tracks from the tracklist.
Uses :meth:`filter()` to lookup the tracks to remove.
Triggers the :meth:`mopidy.core.CoreListener.tracklist_changed` event.
:param criteria: on or more criteria to match by
:type criteria: dict
:rtype: list of :class:`mopidy.models.TlTrack` that was removed
.. deprecated:: 1.1
Providing the criteria via ``kwargs``.
"""
if kwargs:
deprecation.warn('core.tracklist.remove:kwargs_criteria')
tl_tracks = self.filter(criteria or kwargs)
for tl_track in tl_tracks:
position = self._tl_tracks.index(tl_track)
del self._tl_tracks[position]
self._increase_version()
return tl_tracks
def shuffle(self, start=None, end=None):
"""
Shuffles the entire tracklist. If ``start`` and ``end`` is given only
shuffles the slice ``[start:end]``.
Triggers the :meth:`mopidy.core.CoreListener.tracklist_changed` event.
:param start: position of first track to shuffle
:type start: int or :class:`None`
:param end: position after last track to shuffle
:type end: int or :class:`None`
"""
tl_tracks = self._tl_tracks
# TOOD: use validation helpers?
if start is not None and end is not None:
assert start < end, 'start must be smaller than end'
if start is not None:
assert start >= 0, 'start must be at least zero'
if end is not None:
assert end <= len(tl_tracks), 'end can not be larger than ' + \
'tracklist length'
before = tl_tracks[:start or 0]
shuffled = tl_tracks[start:end]
after = tl_tracks[end or len(tl_tracks):]
random.shuffle(shuffled)
self._tl_tracks = before + shuffled + after
self._increase_version()
def slice(self, start, end):
"""
Returns a slice of the tracklist, limited by the given start and end
positions.
:param start: position of first track to include in slice
:type start: int
:param end: position after last track to include in slice
:type end: int
:rtype: :class:`mopidy.models.TlTrack`
"""
# TODO: validate slice?
return self._tl_tracks[start:end]
def _mark_playing(self, tl_track):
"""Internal method for :class:`mopidy.core.PlaybackController`."""
if self.get_random() and tl_track in self._shuffled:
self._shuffled.remove(tl_track)
def _mark_unplayable(self, tl_track):
"""Internal method for :class:`mopidy.core.PlaybackController`."""
logger.warning('Track is not playable: %s', tl_track.track.uri)
if self.get_random() and tl_track in self._shuffled:
self._shuffled.remove(tl_track)
def _mark_played(self, tl_track):
"""Internal method for :class:`mopidy.core.PlaybackController`."""
if self.consume and tl_track is not None:
self.remove({'tlid': [tl_track.tlid]})
return True
return False
def _trigger_tracklist_changed(self):
if self.get_random():
self._shuffled = self._tl_tracks[:]
random.shuffle(self._shuffled)
else:
self._shuffled = []
logger.debug('Triggering event: tracklist_changed()')
listener.CoreListener.send('tracklist_changed')
def _trigger_options_changed(self):
logger.debug('Triggering options changed event')
listener.CoreListener.send('options_changed')
| {
"content_hash": "4dbd9c050596bde63db838c0755eb046",
"timestamp": "",
"source": "github",
"line_count": 646,
"max_line_length": 79,
"avg_line_length": 33.36532507739938,
"alnum_prop": 0.5877795304815812,
"repo_name": "quartz55/mopidy",
"id": "02508c97f3e7b17ed98a91fcb7820c2860bd23fb",
"size": "21554",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "mopidy/core/tracklist.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "610"
},
{
"name": "Groff",
"bytes": "573"
},
{
"name": "HTML",
"bytes": "805"
},
{
"name": "JavaScript",
"bytes": "82060"
},
{
"name": "Python",
"bytes": "1153759"
},
{
"name": "Shell",
"bytes": "556"
}
],
"symlink_target": ""
} |
import logging, coloredlogs
import re
from modules.config import *
from modules.database import Database
from modules.api import API
from websocket import create_connection
from urllib.parse import urlparse, parse_qs
database = Database(db_host, db_user, db_pass, db_name, db_autocommit)
database.database_connection()
class SongRequest:
CommandMain = ''
CommandMainOptions = ['songrequest', 'sr']
CommandResponses = []
def __init__(self, user, request):
self.user = user
self.request = request
self.api = API(1)
def execute_command(self, command):
from modules.bot import bot_msg
print("Link:", self.request)
youtube_id = self.get_link_id(self.request)
print("ID:", youtube_id)
# if(database.db_add_song_request(youtube_id, self.user)):
response = self.get_song_request(youtube_id)
bot_msg(response)
# send to db:
# user, id, timestamp, position (get this on insert)
# test stuff
# ws = create_connection("ws://localhost:3001", subprotocols=["echo-protocol"])
# print("Sending 'Hello world!'")
# ws.send("Hello, world!")
def get_song_request(self, id):
data = self.api.getJSON('https://www.youtube.com/oembed?url=http://www.youtube.com/watch?v={}&format=json'.format(id))
song_title = data['title']
# return "{} added {} to the playlist VaultBoy".format(self.user, song_title)
return "{} was added to the playlist (requested by {}) VaultBoy".format(song_title, self.user)
def get_link_id(self, url):
""" Credit to https://gist.github.com/kmonsoor/2a1afba4ee127cce50a0
Examples of URLs:
Valid:
'http://youtu.be/_lOT2p_FCvA',
'www.youtube.com/watch?v=_lOT2p_FCvA&feature=feedu',
'http://www.youtube.com/embed/_lOT2p_FCvA',
'http://www.youtube.com/v/_lOT2p_FCvA?version=3&hl=en_US',
'https://www.youtube.com/watch?v=rTHlyTphWP0&index=6&list=PLjeDyYvG6-40qawYNR4juzvSOg-ezZ2a6',
'youtube.com/watch?v=_lOT2p_FCvA',
Invalid:
'youtu.be/watch?v=_lOT2p_FCvA', """
if url.startswith(('youtu', 'www')):
url = 'http://' + url
query = urlparse(url)
if 'youtube' in query.hostname:
if query.path == '/watch':
return parse_qs(query.query)['v'][0]
if query.path.startswith(('/embed', '/v')):
return query.path.split('/')[2]
if 'youtu.be' in query.hostname:
return query.path[1:]
else:
raise ValueError | {
"content_hash": "c62499dfb2871b5f7eff17a5f5f6a56f",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 120,
"avg_line_length": 30.44871794871795,
"alnum_prop": 0.6703157894736842,
"repo_name": "kritzware/PyBot",
"id": "f32044b19df16d49606cddf010b8b68a3575c9fe",
"size": "2375",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "modules/Sockets/SongRequest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "266"
},
{
"name": "HTML",
"bytes": "9466"
},
{
"name": "Python",
"bytes": "53098"
}
],
"symlink_target": ""
} |
class PuzzleNotFound(Exception):
pass
| {
"content_hash": "21455d0464bf448ecba62b17ea2b3952",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 32,
"avg_line_length": 21,
"alnum_prop": 0.7619047619047619,
"repo_name": "bbuhai/sudoku-solver",
"id": "ddc93c500fb7d09bec3607fa278420aa2cd7a4df",
"size": "44",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sudoku_solver/error.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20642"
}
],
"symlink_target": ""
} |
from unittest import TestCase
import json
from tornado_stub_client import stub, reset
from tornado_stub_client.collection import RequestCollection
class CommandsTest(TestCase):
""" the stub() command is actually a class who's initializer returns
itself, so we can do stub().and_return() in a ruby rspec mock fashion
"""
def setUp(self):
reset()
def test_init_stub(self):
s = stub("/hello")
self.assertTrue(isinstance(s, stub))
self.assertEqual(s.request.url, "/hello")
def test_init_add_and_return(self):
st = stub("/hello")
req = st.request
st = st.and_return(body="foobar body")
response = st.response_partial(req)
self.assertEquals(response.code, 200)
self.assertEquals(response.body, "foobar body")
self.assertEquals(response.request.url, "/hello")
def test_stub_no_return_doesnt_add_to_collection(self):
st = stub("/hello")
self.assertNotEqual(st.request, None)
resp_partial = RequestCollection.find(st.request)
self.assertEqual(resp_partial, None)
def test_stub_with_method(self):
st = stub("/hello", method="POST").and_return(body="anything")
resp_partial = RequestCollection.find(st.request)
self.assertNotEqual(resp_partial, None)
def test_return_with_body_json(self):
st = stub("/hello").and_return(body_json={'name': 'somebody'})
resp_partial = RequestCollection.find(st.request)
resp = resp_partial(st.request)
self.assertEqual(json.loads(resp.body).get('name'), 'somebody')
def test_no_body(self):
st = stub("/hello").and_return(body=None)
resp_partial = RequestCollection.find(st.request)
resp = resp_partial(st.request)
self.assertEqual(resp.body, '')
def test_no_return_args(self):
st = stub("/hello").and_return()
resp_partial = RequestCollection.find(st.request)
resp = resp_partial(st.request)
self.assertEqual(resp.body, '')
def test_set_response_code_in_stub(self):
st = stub("/hello").and_return(code=418)
resp_partial = RequestCollection.find(st.request)
resp = resp_partial(st.request)
self.assertEqual(resp.code, 418)
| {
"content_hash": "50fb85cc55198271bf4ed23b3f0df874",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 73,
"avg_line_length": 36.70967741935484,
"alnum_prop": 0.6441124780316344,
"repo_name": "dcosson/tornado-stub-client",
"id": "e692608ee5a23f02940b866bda6a16e7a2ff3c03",
"size": "2276",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_commands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12642"
}
],
"symlink_target": ""
} |
import matplotlib
import pmagpy.pmag as pmag
import wx
import copy
import os
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigCanvas
from matplotlib.backends.backend_wxagg import NavigationToolbar2WxAgg as NavigationToolbar
from pylab import Figure
from pmagpy.demag_gui_utilities import *
from numpy import vstack,sqrt
from functools import reduce
has_basemap=True
try: from mpl_toolkits.basemap import Basemap
except ImportError: has_basemap=False
#============================================================================================
# LOG HEADER:
#
# Dialogs boxes for demag_gui.py
#
#============================================================================================
# 9/22/2016 Version 0.2 (beta) by Kevin Gaastra
#
# 3/10/2014 Version 0.1 (beta) by Ron Shaar
#
#
#============================================================================================
#--------------------------------------------------------------
# VGP viewer
#--------------------------------------------------------------
class VGP_Dialog(wx.Dialog):
"""
"""
def __init__(self,parent,VGP_Data):
self.failed_init = False
if not has_basemap: parent.user_warning("This feature requires the matplotlib toolkit basemaps to function. If you are running a binary complain to a dev they forgot to bundle all dependencies"); self.failed_init=True; return
super(VGP_Dialog, self).__init__(parent, title="VGP Viewer")
if not isinstance(VGP_Data,dict): VGP_Data={}
if VGP_Data!={} and not all([len(VGP_Data[k]) for k in list(VGP_Data.keys())]):
parent.user_warning("No VGP Data for VGP viewer to display")
self.Destroy(); self.failed_init=True; return
self.WD=parent.WD
self.test_mode=parent.test_mode
self.selected_pole = None
self.selected_pole_index = 0
self.dp_list = []
self.GUI_RESOLUTION=parent.GUI_RESOLUTION
self.VGP_Data = VGP_Data
self.init_UI()
self.fill_logger() #initialize logger
self.plot() #initialize plot
def init_UI(self):
self.panel = wx.Panel(self,-1)
#build Plot
self.fig = Figure((6*self.GUI_RESOLUTION, 3*self.GUI_RESOLUTION), dpi=100)
self.canvas = FigCanvas(self.panel, -1, self.fig)
self.toolbar = NavigationToolbar(self.canvas)
self.toolbar.Hide()
self.toolbar.zoom()
self.plot_setting = "Zoom"
self.canvas.Bind(wx.EVT_LEFT_DCLICK,self.on_plot_select)
self.canvas.Bind(wx.EVT_MOTION,self.on_change_plot_cursor)
self.canvas.Bind(wx.EVT_MIDDLE_DOWN,self.on_home_plot)
self.canvas.Bind(wx.EVT_RIGHT_DOWN,self.on_pan_zoom_plot)
self.eqarea = self.fig.add_subplot(111)
#set map parameters
vgp_lons = [dp['vgp_lon'] for dp in self.VGP_Data['sites'] if 'vgp_lon' in dp]
self.mean_lon = sum(vgp_lons)/len(vgp_lons)
#build combobox with VGP level options
self.VGP_level = list(self.VGP_Data.keys())[0]
self.combo_box = wx.ComboBox(self.panel, -1, size=(340*self.GUI_RESOLUTION,25), value=self.VGP_level, choices=sorted(self.VGP_Data.keys()), style=wx.CB_DROPDOWN, name="vgp_level")
self.Bind(wx.EVT_COMBOBOX, self.on_level_box, self.combo_box)
#build logger
self.logger = wx.ListCtrl(self.panel, -1, size=(340*self.GUI_RESOLUTION,240*self.GUI_RESOLUTION), style=wx.LC_REPORT)
self.logger.InsertColumn(0, 'element', width=50*self.GUI_RESOLUTION)
self.logger.InsertColumn(1, 'fit name', width=50*self.GUI_RESOLUTION)
self.logger.InsertColumn(2, 'lat', width=50*self.GUI_RESOLUTION)
self.logger.InsertColumn(3, 'lon', width=50*self.GUI_RESOLUTION)
self.logger.InsertColumn(4, 'dp', width=50*self.GUI_RESOLUTION)
self.logger.InsertColumn(5, 'dm', width=50*self.GUI_RESOLUTION)
self.logger.InsertColumn(6, 'n', width=50*self.GUI_RESOLUTION)
self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.on_click_listctrl, self.logger)
hbox0 = wx.BoxSizer(wx.HORIZONTAL)
vbox0 = wx.BoxSizer(wx.VERTICAL)
vbox0.Add(self.combo_box,proportion=0,flag=wx.ALIGN_TOP|wx.ALL,border=8)
vbox0.Add(self.logger,proportion=1,flag=wx.ALIGN_TOP|wx.ALL,border=8)
hbox0.Add(vbox0,proportion=0,flag=wx.ALIGN_TOP|wx.ALL,border=8)
hbox0.Add(self.canvas,proportion=1,flag=wx.ALIGN_TOP|wx.ALL,border=8)
self.panel.SetSizer(hbox0)
hbox0.Fit(self)
#set hotkeys
randomId = wx.NewId()
randomId2 = wx.NewId()
self.Bind(wx.EVT_MENU, self.on_exit_hk, id=randomId)
self.Bind(wx.EVT_MENU, self.save_plot, id=randomId2)
accel_tbl = wx.AcceleratorTable([(wx.ACCEL_CTRL, ord('Q'), randomId ),(wx.ACCEL_CTRL, ord('S'), randomId2 )])
self.SetAcceleratorTable(accel_tbl)
def on_exit_hk(self,event):
self.Close()
def save_plot(self,event):
SaveMyPlot(self.fig,self.VGP_level,"VGPPlot",self.WD,test_mode=self.test_mode)
def on_plot_select(self,event):
"""
Select data point if cursor is in range of a data point
@param: event -> the wx Mouseevent for that click
"""
if not self.xdata or not self.ydata: return
pos=event.GetPosition()
width, height = self.canvas.get_width_height()
pos[1] = height - pos[1]
xpick_data,ypick_data = pos
xdata_org = self.xdata
ydata_org = self.ydata
data_corrected = self.eqarea.transData.transform(vstack([xdata_org,ydata_org]).T)
xdata,ydata = data_corrected.T
xdata = list(map(float,xdata))
ydata = list(map(float,ydata))
e = 4e0
for i,(x,y) in enumerate(zip(xdata,ydata)):
if 0 < sqrt((x-xpick_data)**2. + (y-ypick_data)**2.) < e:
index = i
break
self.change_selected(index)
def on_change_plot_cursor(self,event):
"""
If mouse is over data point making it selectable change the shape of the cursor
@param: event -> the wx Mouseevent for that click
"""
if not self.xdata or not self.ydata: return
pos=event.GetPosition()
width, height = self.canvas.get_width_height()
pos[1] = height - pos[1]
xpick_data,ypick_data = pos
xdata_org = self.xdata
ydata_org = self.ydata
data_corrected = self.eqarea.transData.transform(vstack([xdata_org,ydata_org]).T)
xdata,ydata = data_corrected.T
xdata = list(map(float,xdata))
ydata = list(map(float,ydata))
e = 4e0
if self.plot_setting == "Zoom":
self.canvas.SetCursor(wx.Cursor(wx.CURSOR_CROSS))
else:
self.canvas.SetCursor(wx.Cursor(wx.CURSOR_ARROW))
for i,(x,y) in enumerate(zip(xdata,ydata)):
if 0 < sqrt((x-xpick_data)**2. + (y-ypick_data)**2.) < e:
self.canvas.SetCursor(wx.Cursor(wx.CURSOR_HAND))
break
event.Skip()
def on_home_plot(self,event):
self.toolbar.home()
def on_pan_zoom_plot(self,event):
if event.LeftIsDown():
return
elif self.plot_setting == "Zoom":
self.plot_setting = "Pan"
try: self.toolbar.pan('off')
except TypeError: print('error in changing plot function to pan')
elif self.plot_setting == "Pan":
self.plot_setting = "Zoom"
try: self.toolbar.zoom()
except TypeError: print('error in changing plot function to zoom')
def on_level_box(self,event):
self.VGP_level=self.combo_box.GetValue()
self.fill_logger(); self.plot()
def draw_map(self):
#set basemap
self.map = Basemap(projection='moll',lon_0=0,resolution='c',ax=self.eqarea)
self.map.drawcoastlines(linewidth=.25)
self.map.fillcontinents(color='bisque',lake_color='white',zorder=1)
self.map.drawmapboundary(fill_color='white')
self.map.drawmeridians(list(range(0,390,30)))
self.map.drawparallels(list(range(-90,120,30)))
def plot(self):
self.eqarea.clear()
self.xdata,self.ydata = [],[]
data = self.VGP_Data[self.VGP_level]
self.draw_map()
ymin, ymax = self.eqarea.get_ylim()
xmin, xmax = self.eqarea.get_xlim()
for dp in data:
lat,lon = dp['vgp_lat'],dp['vgp_lon']
XYM=self.map(float(lon),float(lat))
FC=dp['color'];EC=dp['color']
if self.selected_pole==dp['name']+dp['comp_name']: marker='D'
else: marker='o'
self.map.scatter([XYM[0]],[XYM[1]],marker=marker,edgecolor=EC, facecolor=FC,s=30,lw=1,clip_on=False,zorder=2)
self.xdata.append(XYM[0]);self.ydata.append(XYM[1])
#consider adding ellipse for uncertinties
self.eqarea.set_xlim(xmin, xmax)
self.eqarea.set_ylim(ymin, ymax)
self.canvas.draw()
def fill_logger(self):
self.logger.DeleteAllItems(); self.dp_list = []
data = self.VGP_Data[self.VGP_level]
for i,dp in enumerate(data): self.update_logger_entry(i,dp)
def update_logger_entry(self,i,pars):
if len(self.dp_list)>i:
self.dp_list.pop(i)
self.dp_list.insert(i,pars['name']+pars['comp_name'])
if i < self.logger.GetItemCount():
self.logger.DeleteItem(i)
self.logger.InsertItem(i, str(pars['name']))
self.logger.SetItem(i, 1, str(pars['comp_name']))
self.logger.SetItem(i, 2, str(pars['vgp_lat']))
self.logger.SetItem(i, 3, str(pars['vgp_lon']))
self.logger.SetItem(i, 4, str(pars['vgp_dp']))
self.logger.SetItem(i, 5, str(pars['vgp_dm']))
self.logger.SetItem(i, 6, str(pars['n']))
self.logger.SetItemBackgroundColour(i,"WHITE")
if self.selected_pole_index==i:
self.selected_pole=pars['name']+pars['comp_name']
self.logger.SetItemBackgroundColour(i,"LIGHT BLUE")
def change_selected(self,i):
old_pole_index = self.selected_pole_index
self.selected_pole_index = i
self.logger.SetItemBackgroundColour(old_pole_index,"WHITE")
self.logger.SetItemBackgroundColour(self.selected_pole_index,"LIGHT BLUE")
self.selected_pole = self.dp_list[self.selected_pole_index]
self.plot()
def on_click_listctrl(self,event):
self.change_selected(event.GetIndex())
#--------------------------------------------------------------
# Save plots
#--------------------------------------------------------------
class SaveMyPlot(wx.Frame):
""""""
def __init__(self,fig,name,plot_type,dir_path,test_mode=False):
"""Constructor"""
wx.Frame.__init__(self, parent=None, title="")
file_choices="(*.pdf)|*.pdf|(*.svg)|*.svg| (*.png)|*.png"
default_fig_name="%s_%s.pdf"%(name,plot_type)
dlg = wx.FileDialog(
self,
message="Save plot as...",
defaultDir=dir_path,
defaultFile=default_fig_name,
wildcard=file_choices,
style=wx.SAVE)
dlg.Center()
if test_mode: result=dlg.GetAffirmativeId()
else: result=dlg.ShowModal()
if result == wx.ID_OK:
path = dlg.GetPath()
else:
return
title=name
self.panel = wx.Panel(self)
self.dpi=300
canvas_tmp_1 = FigCanvas(self.panel, -1, fig)
canvas_tmp_1.print_figure(path, dpi=self.dpi)
#--------------------------------------------------------------
# MagIc results tables dialog
#--------------------------------------------------------------
class magic_pmag_specimens_table_dialog(wx.Dialog):
def __init__(self,parent):
super(magic_pmag_specimens_table_dialog, self).__init__(parent, title="MagIC specimens table Dialog")
self.InitUI()
def InitUI(self):
pnl1 = wx.Panel(self)
vbox = wx.StaticBoxSizer(wx.StaticBox( pnl1, wx.ID_ANY, "MagIC result tables options" ), wx.VERTICAL)
#---------------------
# Acceptance criteria
#---------------------
#self.acceptance_criteria_text=wx.StaticText(pnl1,label="apply acceptance criteria from pmag_criteria.txt:",style=wx.TE_CENTER)
#self.cb_acceptance_criteria= wx.CheckBox(pnl1, -1, 'apply acceptance criteria from pmag_criteria.txt', (10, 30))
#---------------------
# choose coordinate system
#---------------------
self.coor_text=wx.StaticText(pnl1,label="choose which coordinate systems to save in specimens table:",style=wx.TE_CENTER)
#self.rb_spec_coor = wx.RadioButton(pnl1, -1, 'specimen', (10, 10), style=wx.RB_GROUP)
#self.rb_geo_coor = wx.RadioButton(pnl1, -1, 'geographic', (10, 30))
#self.rb_tilt_coor = wx.RadioButton(pnl1, -1, 'tilt-corrected', (10, 30))
self.cb_spec_coor = wx.CheckBox(pnl1, -1, label='specimen')
self.cb_geo_coor = wx.CheckBox(pnl1, -1, label='geographic')
self.cb_tilt_coor = wx.CheckBox(pnl1, -1, label='tilt-corrected')
#self.rb_geo_tilt_coor = wx.RadioButton(pnl1, -1, 'geographic and tilt-corrected', (10, 30))
self.cb_spec_coor.SetValue(True)
self.cb_geo_coor.SetValue(False)
self.cb_tilt_coor.SetValue(False)
#self.rb_geo_coor.SetValue(True)
#self.rb_tilt_coor.SetValue(True)
#self.rb_geo_tilt_coor.SetValue(True)
coordinates_window = wx.GridSizer(1, 3, 6, 6)
coordinates_window.AddMany( [(self.cb_spec_coor),
(self.cb_geo_coor),
(self.cb_tilt_coor)])
#(self.rb_geo_tilt_coor)])
#---------------------
# OK/Cancel buttons
#---------------------
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.okButton = wx.Button(pnl1, wx.ID_OK, "&OK")
self.cancelButton = wx.Button(pnl1, wx.ID_CANCEL, '&Cancel')
hboxok.Add(self.okButton)
hboxok.AddSpacer(20)
hboxok.Add(self.cancelButton )
#---------------------
vbox.Add(wx.StaticLine(pnl1), 0, wx.ALL|wx.EXPAND, 5)
vbox.AddSpacer(10)
vbox.Add(self.coor_text,flag=wx.ALIGN_CENTER_HORIZONTAL, border=100)
vbox.AddSpacer(10)
vbox.Add(coordinates_window,flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(10)
#-------------
vbox1=wx.BoxSizer(wx.VERTICAL)
vbox1.AddSpacer(10)
vbox1.Add(vbox)
vbox1.AddSpacer(10)
vbox1.Add(hboxok,flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox1.AddSpacer(10)
pnl1.SetSizer(vbox1)
vbox1.Fit(self)
#--------------------------------------------------------------
# No Lat, Lon for VGP dialog
#--------------------------------------------------------------
class user_input(wx.Dialog):
"""
Generic user input dialog that asks for input any set of inputs into a series of TextCtrls
"""
def __init__(self,parent,inputs,parse_funcs=[],heading=None,title="User Input Required",values=[]):
"""
@param: parent - the wx.Frame calling the dialog
@param: inputs - a list of strings giving the names of the inputs wanted
@param: parse_funcs - a list of the functions to apply to inputs, None for any entry will result in return of raw input.
@param: heading - string giving the heading for the dialog if None a default heading will be constructed
"""
super(user_input, self).__init__(parent, title=title)
self.inputs = inputs
self.parse_funcs = parse_funcs
self.InitUI(heading,values=values)
def InitUI(self,heading,values=[]):
#make header and panel
pnl1 = wx.Panel(self)
if heading == None:
heading = "User Input required for values: " + reduce(lambda x,y: x+','+y, self.inputs)
vbox = wx.StaticBoxSizer(wx.StaticBox(pnl1, wx.ID_ANY,heading), wx.VERTICAL)
#make inputs
list_ctrls_for_window=[]
self.list_ctrls=[]
if len(values) != len(self.inputs): values = ['' for _ in range(len(self.inputs))]
for inp,val in zip(self.inputs,values):
list_ctrls_for_window.append((wx.StaticText(pnl1,label=inp,style=wx.TE_CENTER), wx.EXPAND))
self.list_ctrls.append(wx.TextCtrl(pnl1,value=str(val),style=wx.TE_CENTER,size=(200,20)))
list_ctrls_for_window.append(self.list_ctrls[-1])
ctrl_window = wx.GridSizer(2, len(self.list_ctrls), 6, 6)
ctrl_window.AddMany(list_ctrls_for_window)
#make okay and cancel buttons
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.okButton = wx.Button(pnl1, wx.ID_OK, "&OK")
self.cancelButton = wx.Button(pnl1, wx.ID_CANCEL, '&Cancel')
hboxok.Add(self.okButton)
hboxok.AddSpacer(20)
hboxok.Add(self.cancelButton)
vbox.Add(wx.StaticLine(pnl1), 0, wx.ALL|wx.EXPAND, 5)
vbox.Add(ctrl_window, 0, wx.ALL|wx.EXPAND, 5)
vbox.AddSpacer(10)
vbox.Add(hboxok, 0, wx.ALL|wx.EXPAND, 5)
pnl1.SetSizer(vbox)
vbox.Fit(self)
def get_values(self):
"""
Applies parsing functions to each input as specified in init before returning a tuple with first entry being a boolean which specifies if the user entered all values and a second entry which is a dictionary of input names to parsed values.
"""
return_dict = {}
for i,ctrl in enumerate(self.list_ctrls):
if hasattr(self.parse_funcs,'__getitem__') and len(self.parse_funcs)>i and hasattr(self.parse_funcs[i],'__call__'):
try: return_dict[self.inputs[i]] = self.parse_funcs[i](ctrl.GetValue())
except: return_dict[self.inputs[i]] = ctrl.GetValue()
else:
return_dict[self.inputs[i]] = ctrl.GetValue()
return ('' not in list(return_dict.values()), return_dict)
#--------------------------------------------------------------
# MagIC results tables dialog
#--------------------------------------------------------------
class magic_pmag_tables_dialog(wx.Dialog):
def __init__(self,parent,WD,Data,Data_info):
super(magic_pmag_tables_dialog, self).__init__(parent, title="MagIC results table Dialog")
self.InitUI()
def InitUI(self):
pnl1 = wx.Panel(self)
vbox = wx.StaticBoxSizer(wx.StaticBox( pnl1, wx.ID_ANY, "MagIC result tables options" ), wx.VERTICAL)
#---------------------
# Acceptance criteria
#---------------------
#self.acceptance_criteria_text=wx.StaticText(pnl1,label="apply acceptance criteria from pmag_criteria.txt:",style=wx.TE_CENTER)
self.cb_acceptance_criteria= wx.CheckBox(pnl1, -1, 'apply acceptance criteria from pmag_criteria.txt', (10, 30))
#---------------------
# choose coordinate system
#---------------------
self.coor_text=wx.StaticText(pnl1,label="coordinate system:",style=wx.TE_CENTER)
self.rb_spec_coor = wx.RadioButton(pnl1, -1, 'specimen', (10, 10), style=wx.RB_GROUP)
self.rb_geo_coor = wx.RadioButton(pnl1, -1, 'geographic', (10, 30))
self.rb_tilt_coor = wx.RadioButton(pnl1, -1, 'tilt-corrected', (10, 30))
self.rb_geo_tilt_coor = wx.RadioButton(pnl1, -1, 'geographic and tilt-corrected', (10, 30))
self.rb_geo_coor.SetValue(True)
coordinates_window = wx.GridSizer(1, 4, 6, 6)
coordinates_window.AddMany( [(self.rb_spec_coor),
(self.rb_geo_coor),
(self.rb_tilt_coor),
(self.rb_geo_tilt_coor)])
#---------------------
# default age
#---------------------
self.default_age_text=wx.StaticText(pnl1,label="default age if site age does not exist in er_ages.txt:",style=wx.TE_CENTER)
self.cb_default_age = wx.CheckBox(pnl1, -1, 'use default age', (10, 30))
self.default_age_min=wx.TextCtrl(pnl1,style=wx.TE_CENTER,size=(50,20))
self.default_age_max=wx.TextCtrl(pnl1,style=wx.TE_CENTER,size=(50,20))
age_unit_choices=['Years Cal BP','Years Cal AD (+/-)','Years BP','Years AD (+/-)','Ma','Ka','Ga']
self.default_age_unit=wx.ComboBox(pnl1, -1,size=(150, -1), value = '', choices=age_unit_choices, style=wx.CB_READONLY)
default_age_window = wx.GridSizer(2, 4, 6, 6)
default_age_window.AddMany( [(wx.StaticText(pnl1,label="",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="younger bound",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="older bound",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="units",style=wx.TE_CENTER), wx.EXPAND),
(self.cb_default_age,wx.EXPAND),
(self.default_age_min,wx.EXPAND),
(self.default_age_max,wx.EXPAND),
(self.default_age_unit,wx.EXPAND)])
#---------------------
# sample
#---------------------
self.cb_sample_mean=wx.CheckBox(pnl1, -1, 'calculate sample mean ', (10, 30))
self.Bind(wx.EVT_CHECKBOX,self.on_change_cb_sample_mean,self.cb_sample_mean)
self.cb_sample_mean.SetValue(False)
sample_mean_choices=['specimens']
self.combo_sample_mean=wx.ComboBox(pnl1, -1,size=(150, -1), value = 'specimens', choices=sample_mean_choices, style=wx.CB_READONLY)
sample_mean_types=['Fisher']
self.combo_sample_type=wx.ComboBox(pnl1, -1,size=(150, -1), value = 'Fisher', choices=sample_mean_types, style=wx.CB_READONLY)
self.cb_sample_mean_VGP=wx.CheckBox(pnl1, -1, 'calculate sample VGP', (10, 30))
self.cb_sample_mean_VGP.SetValue(False)
self.Bind(wx.EVT_CHECKBOX,self.on_change_cb_sample_mean_VGP,self.cb_sample_mean_VGP)
sample_mean_window = wx.GridSizer(2, 4, 6, 6)
sample_mean_window.AddMany( [(wx.StaticText(pnl1,label="",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="average sample by:",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="calculation type",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="",style=wx.TE_CENTER), wx.EXPAND),
(self.cb_sample_mean,wx.EXPAND),
(self.combo_sample_mean,wx.EXPAND),
(self.combo_sample_type,wx.EXPAND),
(self.cb_sample_mean_VGP,wx.EXPAND)])
#---------------------
# site
#---------------------
self.cb_site_mean=wx.CheckBox(pnl1, -1, 'calculate site mean ', (10, 30))
self.cb_site_mean.SetValue(True)
site_mean_choices=['specimens','samples']
self.combo_site_mean=wx.ComboBox(pnl1, -1,size=(150, -1), value = 'specimens', choices=site_mean_choices, style=wx.CB_READONLY)
self.Bind(wx.EVT_COMBOBOX,self.on_change_site_mean,self.combo_site_mean)
site_mean_types=['Fisher']
self.combo_site_type=wx.ComboBox(pnl1, -1,size=(150, -1), value = 'Fisher', choices=site_mean_types, style=wx.CB_READONLY)
self.cb_site_mean_VGP=wx.CheckBox(pnl1, -1, 'calculate site VGP', (10, 30))
self.cb_site_mean_VGP.SetValue(True)
self.Bind(wx.EVT_CHECKBOX,self.on_change_cb_site_mean_VGP,self.cb_site_mean_VGP)
site_mean_window = wx.GridSizer(2, 4, 6, 6)
site_mean_window.AddMany( [(wx.StaticText(pnl1,label="",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="average site by:",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="calculation type",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="",style=wx.TE_CENTER), wx.EXPAND),
(self.cb_site_mean,wx.EXPAND),
(self.combo_site_mean,wx.EXPAND),
(self.combo_site_type,wx.EXPAND),
(self.cb_site_mean_VGP,wx.EXPAND)])
#---------------------
# location
#---------------------
self.cb_location_mean=wx.CheckBox(pnl1, -1, 'calculate location mean', (10, 30))
self.cb_location_mean.SetValue(False)
location_mean_choices=['sites']
self.combo_location_mean=wx.ComboBox(pnl1, -1,size=(150, -1), value = 'sites', choices=location_mean_choices, style=wx.CB_READONLY)
location_mean_types=['Fisher-separate polarities']
self.combo_loction_type=wx.ComboBox(pnl1, -1,size=(150, -1), value = 'Fisher-separate polarities', choices=location_mean_types, style=wx.CB_READONLY)
self.cb_location_mean_VGP=wx.CheckBox(pnl1, -1, 'calculate location VGP', (10, 30))
self.cb_location_mean_VGP.SetValue(True)
#self.Bind(wx.EVT_CHECKBOX,self.on_change_cb_location_mean_VGP,self.cb_location_mean_VGP)
loaction_mean_window = wx.GridSizer(2, 4, 6, 6)
loaction_mean_window.AddMany( [(wx.StaticText(pnl1,label="",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="average location by:",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="calculation type",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="",style=wx.TE_CENTER), wx.EXPAND),
(self.cb_location_mean,wx.EXPAND),
(self.combo_location_mean,wx.EXPAND),
(self.combo_loction_type,wx.EXPAND),
(self.cb_location_mean_VGP,wx.EXPAND)])
#---------------------
# OK/Cancel buttons
#---------------------
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.okButton = wx.Button(pnl1, wx.ID_OK, "&OK")
self.cancelButton = wx.Button(pnl1, wx.ID_CANCEL, '&Cancel')
hboxok.Add(self.okButton)
hboxok.AddSpacer(20)
hboxok.Add(self.cancelButton)
#---------------------
vbox.Add(wx.StaticLine(pnl1), 0, wx.ALL|wx.EXPAND, 5)
vbox.AddSpacer(10)
vbox.Add(self.cb_acceptance_criteria,flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(10)
vbox.Add(wx.StaticLine(pnl1), 0, wx.ALL|wx.EXPAND, 5)
vbox.AddSpacer(10)
vbox.Add(self.coor_text,flag=wx.ALIGN_CENTER_HORIZONTAL, border=100)
vbox.AddSpacer(10)
vbox.Add(coordinates_window,flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(10)
vbox.Add(wx.StaticLine(pnl1), 0, wx.ALL|wx.EXPAND, 5)
vbox.AddSpacer(10)
vbox.Add(self.default_age_text,flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(10)
vbox.Add(default_age_window,flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(10)
vbox.Add(wx.StaticLine(pnl1), 0, wx.ALL|wx.EXPAND, 5)
vbox.AddSpacer(10)
vbox.Add(sample_mean_window,flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(10)
vbox.Add(wx.StaticLine(pnl1), 0, wx.ALL|wx.EXPAND, 5)
vbox.AddSpacer(10)
vbox.Add(site_mean_window,flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(10)
vbox.Add(wx.StaticLine(pnl1), 0, wx.ALL|wx.EXPAND, 5)
vbox.AddSpacer(10)
vbox.Add(loaction_mean_window,flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(10)
vbox.Add(wx.StaticLine(pnl1), 0, wx.ALL|wx.EXPAND, 5)
vbox.AddSpacer(10)
#-------------
vbox1=wx.BoxSizer(wx.VERTICAL)
vbox1.AddSpacer(10)
vbox1.Add(vbox)
vbox1.AddSpacer(10)
vbox1.Add(hboxok,flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox1.AddSpacer(10)
pnl1.SetSizer(vbox1)
vbox1.Fit(self)
def on_change_cb_sample_mean_VGP(self,event):
if self.cb_sample_mean_VGP.GetValue()==True:
self.cb_site_mean_VGP.SetValue(False)
def on_change_cb_site_mean_VGP(self,event):
if self.cb_site_mean_VGP.GetValue()==True:
self.cb_sample_mean_VGP.SetValue(False)
def on_change_cb_location_mean_VGP(self,event):
if self.cb_location_mean_VGP.GetValue()==True:
self.cb_location_mean_VGP.SetValue(False)
def on_change_cb_sample_mean(self,event):
if self.combo_site_mean.GetValue()=='samples' and not self.cb_sample_mean.GetValue():
self.combo_site_mean.SetValue('specimens')
def on_change_site_mean(self,event):
if self.combo_site_mean.GetValue()=='samples' and not self.cb_sample_mean.GetValue():
self.cb_sample_mean.SetValue(True)
#--------------------------------------------------------------
# MagIc results tables dialog
#--------------------------------------------------------------
#--------------------------------------------------------------
# MagIC generic files conversion
#--------------------------------------------------------------
class convert_generic_files_to_MagIC(wx.Frame):
""""""
title = "PmagPy Thellier GUI generic file conversion"
def __init__(self,WD):
wx.Frame.__init__(self, None, wx.ID_ANY, self.title)
self.panel = wx.Panel(self)
#self.MakeModal(True)
self.max_files=10
self.WD=WD
self.InitUI()
self.END=False
def InitUI(self):
pnl = self.panel
#---sizer infor ----
TEXT=[]
TEXT.append("A generic file is a tab-delimited file with the following headers:\n")
TEXT.append("specimen treatment step moment dec_s inc_s dec_g inc_g dec_t inc_t \n")
TEXT.append("treatment: N [NRM], A[AF] T[Thermal].\n")
TEXT.append("step: if treatment=N: should be 0.\n")
TEXT.append("step: if treatment=A: peak field in mT.\n")
TEXT.append("step: if treatment=T: Temperature in C.\n")
TEXT.append("step: if treatment=N: peak field in mT.\n")
TEXT.append("moment: magnetic moment in units of emu.\n")
TEXT.append("dec_s inc_s: declination/inclination in specimen coordinates\n" )
TEXT.append("dec_g inc_g: declination/inclination in geographic coordinates\n")
TEXT.append("dec_t inc_t: declination/inclination in tilt corrected coordinates\n")
TEXT.append("\n At least one set of dec/inc is required.\n")
TEXT.append("\n The order of the columns is not important.\n")
STRING="".join(TEXT)
bSizer_info = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.HORIZONTAL )
bSizer_info.Add(wx.StaticText(pnl,label=STRING),wx.ALIGN_LEFT)
#---sizer 0 ----
TEXT="file:\n choose measurement file\n no spaces are allowed in path"
bSizer0 = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.VERTICAL )
bSizer0.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_TOP)
bSizer0.AddSpacer(5)
for i in range(self.max_files):
command= "self.file_path_%i = wx.TextCtrl(self.panel, id=-1, size=(200,25), style=wx.TE_READONLY)"%i
exec(command)
command= "self.add_file_button_%i = wx.Button(self.panel, id=-1, label='add',name='add_%i')"%(i,i)
exec(command)
command= "self.Bind(wx.EVT_BUTTON, self.on_add_file_button_i, self.add_file_button_%i)"%i
#print command
exec(command)
command="bSizer0_%i = wx.BoxSizer(wx.HORIZONTAL)"%i
exec(command)
command="bSizer0_%i.Add(wx.StaticText(pnl,label=('%i '[:2])),wx.ALIGN_LEFT)"%(i,i+1)
exec(command)
command="bSizer0_%i.Add(self.file_path_%i,wx.ALIGN_LEFT)" %(i,i)
exec(command)
command="bSizer0_%i.Add(self.add_file_button_%i,wx.ALIGN_LEFT)" %(i,i)
exec(command)
command="bSizer0.Add(bSizer0_%i,wx.ALIGN_TOP)" %i
exec(command)
bSizer0.AddSpacer(5)
# #---sizer 1 ----
#
# TEXT="\n\nExperiment:"
# bSizer1 = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.VERTICAL )
# bSizer1.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_TOP)
# self.experiments_names=['IZZI','IZ','ZI','ATRM 6 positions','cooling rate','NLT']
# bSizer1.AddSpacer(5)
# for i in range(self.max_files):
# command="self.protocol_info_%i = wx.ComboBox(self.panel, -1, self.experiments_names[0], size=(100,25), choices=self.experiments_names, style=wx.CB_DROPDOWN)"%i
# #command="self.protocol_info_%i = wx.TextCtrl(self.panel, id=-1, size=(100,20), style=wx.TE_MULTILINE | wx.HSCROLL)"%i
# #print command
# exec command
# command="bSizer1.Add(self.protocol_info_%i,wx.ALIGN_TOP)"%i
# exec command
# bSizer1.AddSpacer(5)
#---sizer 2 ----
#TEXT="Blab:\n(microT dec inc)\nexample: 40 0 -90 "
#bSizer2 = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.VERTICAL )
#bSizer2.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_TOP)
#bSizer2.AddSpacer(5)
#for i in range(self.max_files):
# command= "self.file_info_Blab_%i = wx.TextCtrl(self.panel, id=-1, size=(40,25))"%i
# exec command
# command= "self.file_info_Blab_dec_%i = wx.TextCtrl(self.panel, id=-1, size=(40,25))"%i
# exec command
# command= "self.file_info_Blab_inc_%i = wx.TextCtrl(self.panel, id=-1, size=(40,25))"%i
# exec command
# command="bSizer2_%i = wx.BoxSizer(wx.HORIZONTAL)"%i
# exec command
# command="bSizer2_%i.Add(self.file_info_Blab_%i ,wx.ALIGN_LEFT)" %(i,i)
# exec command
# command="bSizer2_%i.Add(self.file_info_Blab_dec_%i,wx.ALIGN_LEFT)" %(i,i)
# exec command
# command="bSizer2_%i.Add(self.file_info_Blab_inc_%i,wx.ALIGN_LEFT)" %(i,i)
# exec command
# command="bSizer2.Add(bSizer2_%i,wx.ALIGN_TOP)" %i
# exec command
# bSizer2.AddSpacer(5)
#self.blab_info = wx.TextCtrl(self.panel, id=-1, size=(80,250), style=wx.TE_MULTILINE | wx.HSCROLL)
#bSizer2.Add(self.blab_info,wx.ALIGN_TOP)
#---sizer 3 ----
TEXT="\nUser\nname:"
bSizer3 = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.VERTICAL )
bSizer3.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_TOP)
bSizer3.AddSpacer(5)
for i in range(self.max_files):
command= "self.file_info_user_%i = wx.TextCtrl(self.panel, id=-1, size=(60,25))"%i
exec(command)
command="bSizer3.Add(self.file_info_user_%i,wx.ALIGN_TOP)" %i
exec(command)
bSizer3.AddSpacer(5)
#---sizer 4 ----
TEXT="\nsample-specimen\nnaming convention:"
bSizer4 = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.VERTICAL )
bSizer4.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_TOP)
self.sample_naming_conventions=['sample=specimen','no. of terminate characters','charceter delimited']
bSizer4.AddSpacer(5)
for i in range(self.max_files):
command="self.sample_naming_convention_%i = wx.ComboBox(self.panel, -1, self.sample_naming_conventions[0], size=(180,25), choices=self.sample_naming_conventions, style=wx.CB_DROPDOWN)"%i
exec(command)
command="self.sample_naming_convention_char_%i = wx.TextCtrl(self.panel, id=-1, size=(40,25))"%i
exec(command)
command="bSizer4_%i = wx.BoxSizer(wx.HORIZONTAL)"%i
exec(command)
command="bSizer4_%i.Add(self.sample_naming_convention_%i,wx.ALIGN_LEFT)" %(i,i)
exec(command)
command="bSizer4_%i.Add(self.sample_naming_convention_char_%i,wx.ALIGN_LEFT)" %(i,i)
exec(command)
command="bSizer4.Add(bSizer4_%i,wx.ALIGN_TOP)"%i
exec(command)
bSizer4.AddSpacer(5)
#---sizer 5 ----
TEXT="\nsite-sample\nnaming convention:"
bSizer5 = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.VERTICAL )
bSizer5.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_TOP)
self.site_naming_conventions=['site=sample','no. of terminate characters','charceter delimited']
bSizer5.AddSpacer(5)
for i in range(self.max_files):
command="self.site_naming_convention_char_%i = wx.TextCtrl(self.panel, id=-1, size=(40,25))"%i
exec(command)
command="self.site_naming_convention_%i = wx.ComboBox(self.panel, -1, self.site_naming_conventions[0], size=(180,25), choices=self.site_naming_conventions, style=wx.CB_DROPDOWN)"%i
exec(command)
command="bSizer5_%i = wx.BoxSizer(wx.HORIZONTAL)"%i
exec(command)
command="bSizer5_%i.Add(self.site_naming_convention_%i,wx.ALIGN_LEFT)" %(i,i)
exec(command)
command="bSizer5_%i.Add(self.site_naming_convention_char_%i,wx.ALIGN_LEFT)" %(i,i)
exec(command)
command="bSizer5.Add(bSizer5_%i,wx.ALIGN_TOP)"%i
exec(command)
bSizer5.AddSpacer(5)
#---sizer 6 ----
TEXT="\n\nlocation:"
bSizer6 = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.VERTICAL )
bSizer6.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_TOP)
bSizer6.AddSpacer(5)
for i in range(self.max_files):
command= "self.file_info_location_%i = wx.TextCtrl(self.panel, id=-1, size=(60,25))"%i
exec(command)
command="bSizer6.Add(self.file_info_location_%i,wx.ALIGN_TOP)" %i
exec(command)
bSizer6.AddSpacer(5)
#------------------
#self.add_file_button = wx.Button(self.panel, id=-1, label='add file')
#self.Bind(wx.EVT_BUTTON, self.on_add_file_button, self.add_file_button)
#self.remove_file_button = wx.Button(self.panel, id=-1, label='remove file')
self.okButton = wx.Button(self.panel, wx.ID_OK, "&OK")
self.Bind(wx.EVT_BUTTON, self.on_okButton, self.okButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
#hbox1.Add(self.add_file_button)
#hbox1.Add(self.remove_file_button )
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox2.Add(self.okButton)
hbox2.Add(self.cancelButton )
#------
vbox=wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.AddSpacer(5)
hbox.Add(bSizer0, flag=wx.ALIGN_LEFT)
hbox.AddSpacer(5)
#hbox.Add(bSizer1, flag=wx.ALIGN_LEFT)
#hbox.AddSpacer(5)
#hbox.Add(bSizer2, flag=wx.ALIGN_LEFT)
#hbox.AddSpacer(5)
hbox.Add(bSizer3, flag=wx.ALIGN_LEFT)
hbox.AddSpacer(5)
hbox.Add(bSizer4, flag=wx.ALIGN_LEFT)
hbox.AddSpacer(5)
hbox.Add(bSizer5, flag=wx.ALIGN_LEFT)
hbox.AddSpacer(5)
hbox.Add(bSizer6, flag=wx.ALIGN_LEFT)
hbox.AddSpacer(5)
#-----
vbox.AddSpacer(20)
vbox.Add(bSizer_info,flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(20)
vbox.Add(hbox)
vbox.AddSpacer(20)
vbox.Add(hbox1,flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(20)
vbox.Add(hbox2,flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(20)
self.panel.SetSizer(vbox)
vbox.Fit(self)
self.Show()
self.Centre()
def on_add_file_button(self,event):
dlg = wx.FileDialog(
None,message="choose file to convert to MagIC",
defaultDir=self.WD,
defaultFile="",
style=wx.OPEN | wx.CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
FILE = dlg.GetPath()
# fin=open(FILE,'r')
self.file_path.AppendText(FILE+"\n")
self.protocol_info.AppendText("IZZI"+"\n")
def on_add_file_button_i(self,event):
dlg = wx.FileDialog(
None,message="choose file to convert to MagIC",
defaultDir="./",
defaultFile="",
style=wx.OPEN | wx.CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
FILE = dlg.GetPath()
# fin=open(FILE,'r')
button = event.GetEventObject()
name=button.GetName()
i=int((name).split("_")[-1])
#print "The button's name is " + button.GetName()
command="self.file_path_%i.SetValue(FILE)"%i
exec(command)
#self.file_path.AppendText(FILE)
#self.protocol_info.AppendText("IZZI"+"\n")
def read_generic_file(self,path):
Data={}
if str(path)=="":
return ({})
Fin=open(str(path),'r')
header=Fin.readline().strip('\n').split('\t')
for line in Fin.readlines():
tmp_data={}
l=line.strip('\n').split('\t')
if len(l)<len(header):
continue
else:
for i in range(len(header)):
tmp_data[header[i]]=l[i]
specimen=tmp_data['specimen']
if specimen not in list(Data.keys()):
Data[specimen]=[]
# check dupliactes
if len(Data[specimen]) >0:
if tmp_data['treatment']==Data[specimen][-1]['treatment']:
if tmp_data['step']==Data[specimen][-1]['step']:
print("-W- WARNING: duplicate measurements specimen %s, Treatment %s:%s. keeping onlt the last one"%(tmp_data['specimen'],tmp_data['treatment'],tmp_data['step']))
Data[specimen].pop()
Data[specimen].append(tmp_data)
return(Data)
def on_okButton(self,event):
#-----------------------------------
# Prepare MagIC measurement file
#-----------------------------------
# prepare output file
#magic_headers=['er_citation_names','er_specimen_name',"er_sample_name","er_site_name",'er_location_name','er_analyst_mail_names',\
# "magic_instrument_codes","measurement_flag","measurement_standard","magic_experiment_name","magic_method_codes","measurement_number",'treatment_temp',"measurement_dec","measurement_inc",\
# "measurement_magn_moment","measurement_temp","treatment_dc_field","treatment_dc_field_phi","treatment_dc_field_theta"]
#fout=open("magic_measurements.txt",'w')
#fout.write("tab\tmagic_measurements\n")
#header_string=""
#for i in range(len(magic_headers)):
# header_string=header_string+magic_headers[i]+"\t"
#fout.write(header_string[:-1]+"\n")
#-----------------------------------
os.chdir(self.WD)
Data={}
header_codes=[]
ERROR=""
datafiles=[]
MagRecs=[]
self.er_sample_data={}
try:
self.er_sample_data=self.read_magic_file(os.path.join(self.WD, "er_samples.txt"), 'er_sample_name')
except:
print("-W- WARNING: Cant find er_samples.txt table")
for i in range(self.max_files):
# read data from generic file
datafile=""
command="datafile=self.file_path_%i.GetValue()"%i
exec(command)
#if datafile!="":
# try:
# this_file_data= self.read_generic_file(datafile)
# except:
# print "-E- Cant read file %s" %datafile
#else:
# continue
this_file_data= self.read_generic_file(datafile)
#print "datafile",datafile
#print "this_file_data",this_file_data
# get experiment
#command="experiment=self.protocol_info_%i.GetValue()"%i
#exec command
# get Blab
#labfield=["0","-1","-1"]
#command="labfield[0]=self.file_info_Blab_%i.GetValue()"%i
#exec command
#command="labfield[1]=self.file_info_Blab_dec_%i.GetValue()"%i
#exec command
#command="labfield[2]=self.file_info_Blab_inc_%i.GetValue()"%i
#exec command
# get User_name
user_name=""
command="user_name=self.file_info_user_%i.GetValue()"%i
exec(command)
# get sample-specimen naming convention
sample_naming_convenstion=["",""]
command="sample_naming_convenstion[0]=self.sample_naming_convention_%i.GetValue()"%i
exec(command)
command="sample_naming_convenstion[1]=self.sample_naming_convention_char_%i.GetValue()"%i
exec(command)
# get site-sample naming convention
site_naming_convenstion=["",""]
command="site_naming_convenstion[0]=self.site_naming_convention_%i.GetValue()"%i
exec(command)
command="site_naming_convenstion[1]=self.site_naming_convention_char_%i.GetValue()"%i
exec(command)
# get location
location_name=""
command="location_name=self.file_info_location_%i.GetValue()"%i
exec(command)
# read er_samples.txt
# to check for sample orientation data and tilt-corrected data
ErSamplesRecs=[]
for specimen in list(this_file_data.keys()):
measurement_running_number=0
this_specimen_LT=[]
this_specimen_LP=[]
MagRecs_this_specimen=[]
for meas_line in this_file_data[specimen]:
MagRec={}
#
MagRec["er_specimen_name"]=meas_line['specimen']
MagRec['er_citation_names']="This study"
MagRec["er_sample_name"]=self.get_sample_name(MagRec["er_specimen_name"],sample_naming_convenstion)
MagRec["er_site_name"]=self.get_site_name(MagRec["er_sample_name"],site_naming_convenstion)
MagRec["er_location_name"]=location_name
MagRec['er_analyst_mail_names']=user_name
MagRec["magic_instrument_codes"]=""
MagRec["measurement_flag"]='g'
MagRec["measurement_number"]="%i"%measurement_running_number
MagRec["measurement_temp"]='273.' # room temp in kelvin
MagRec["measurement_standard"]="u"
#-----
MagRec["measurement_magn_moment"]='%10.3e'%(float(meas_line["moment"])*1e-3) # convert to Am^2
# see if core azimuth and tilt-corrected data are in er_samples.txt
sample=MagRec["er_sample_name"]
found_sample_azimuth,found_sample_dip,found_sample_bed_dip_direction,found_sample_bed_dip=False,False,False,False
if sample in list(self.er_sample_data.keys()):
if "sample_azimuth" in list(self.er_sample_data[sample].keys()) and self.er_sample_data[sample]['sample_azimuth'] !="":
sample_azimuth=float(self.er_sample_data[sample]['sample_azimuth'])
found_sample_azimuth=True
if "sample_dip" in list(self.er_sample_data[sample].keys()) and self.er_sample_data[sample]['sample_dip']!="":
sample_dip=float(self.er_sample_data[sample]['sample_dip'])
found_sample_dip=True
if "sample_bed_dip_direction" in list(self.er_sample_data[sample].keys()) and self.er_sample_data[sample]['sample_bed_dip_direction']!="":
sample_bed_dip_direction=float(self.er_sample_data[sample]['sample_bed_dip_direction'])
found_sample_bed_dip_direction=True
if "sample_bed_dip" in list(self.er_sample_data[sample].keys()) and self.er_sample_data[sample]['sample_bed_dip']!="":
sample_bed_dip=float(self.er_sample_data[sample]['sample_bed_dip'])
found_sample_bed_dip=True
else:
self.er_sample_data[sample]={}
#--------------------
# deal with sample orientation
#--------------------
found_s,found_geo,found_tilt=False,False,False
if "dec_s" in list(meas_line.keys()) and "inc_s" in list(meas_line.keys()):
found_s=True
MagRec["measurement_dec"]=meas_line["dec_s"]
MagRec["measurement_inc"]=meas_line["inc_s"]
if "dec_g" in list(meas_line.keys()) and "inc_g" in list(meas_line.keys()):
found_geo=True
if "dec_t" in list(meas_line.keys()) and "inc_t" in list(meas_line.keys()):
found_tilt=True
#-----------------------------
# specimen coordinates: no
# geographic coordinates: yes
#-----------------------------
if found_geo and not found_s:
MagRec["measurement_dec"]=meas_line["dec_g"]
MagRec["measurement_inc"]=meas_line["inc_g"]
# core azimuth/plunge is not in er_samples.txt
if not found_sample_dip or not found_sample_azimuth:
self.er_sample_data[sample]['sample_azimuth']="0"
self.er_sample_data[sample]['sample_dip']="0"
# core azimuth/plunge is in er_samples.txt
else:
sample_azimuth=float(self.er_sample_data[sample]['sample_azimuth'])
sample_dip=float(self.er_sample_data[sample]['sample_dip'])
if sample_azimuth!=0 and sample_dip!=0:
print("-W- WARNING: delete core azimuth/plunge in er_samples.txt\n\
becasue dec_s and inc_s are not avaialable")
#-----------------------------
# specimen coordinates: no
# geographic coordinates: no
#-----------------------------
if not found_geo and not found_s:
print("-E- ERROR: sample %s does not have dec_s/inc_s or dec_g/inc_g. Ignore specimen %s "%(sample,specimen))
break
#-----------------------------
# specimen coordinates: yes
# geographic coordinates: yes
#
# commant: Ron, this need to be tested !!
#-----------------------------
if found_geo and found_s:
cdec,cinc=float(meas_line["dec_s"]),float(meas_line["inc_s"])
gdec,ginc=float(meas_line["dec_g"]),float(meas_line["inc_g"])
az,pl=pmag.get_azpl(cdec,cinc,gdec,ginc)
# core azimuth/plunge is not in er_samples.txt:
# calculate core az/pl and add it to er_samples.txt
if not found_sample_dip or not found_sample_azimuth:
self.er_sample_data[sample]['sample_azimuth']="%.1f"%az
self.er_sample_data[sample]['sample_dip']="%.1f"%pl
# core azimuth/plunge is in er_samples.txt
else:
if float(self.er_sample_data[sample]['sample_azimuth'])!= az:
print("-E- ERROR in sample_azimuth sample %s. Check it! using the value in er_samples.txt"%sample)
if float(self.er_sample_data[sample]['sample_dip'])!= pl:
print("-E- ERROR in sample_dip sample %s. Check it! using the value in er_samples.txt"%sample)
#-----------------------------
# specimen coordinates: yes
# geographic coordinates: no
#-----------------------------
if found_geo and found_s:
if found_sample_dip and found_sample_azimuth:
pass
# (nothing to do)
else:
print("-E- ERROR: missing sample_dip or sample_azimuth for sample %s.ignoring specimens "%sample)
break
#-----------------------------
# tilt-corrected coordinates: yes
# geographic coordinates: no
#-----------------------------
if found_tilt and not found_geo:
print("-E- ERROR: missing geographic data for sample %s. Ignoring tilt-corrected data "%sample)
if found_tilt and found_geo:
dec_geo,inc_geo=float(meas_line["dec_g"]),float(meas_line["inc_g"])
dec_tilt,inc_tilt=float(meas_line["dec_t"]),float(meas_line["inc_t"])
if dec_geo==dec_tilt and inc_geo==inc_tilt:
DipDir,Dip=0.,0.
else:
DipDir,Dip=pmag.get_tilt(dec_geo,inc_geo,dec_tilt,inc_tilt)
if not found_sample_bed_dip_direction or not found_sample_bed_dip:
print("-I- calculating dip and dip direction used for tilt correction sample %s. results are put in er_samples.txt"%sample)
self.er_sample_data[sample]['sample_bed_dip_direction']="%.1f"%DipDir
self.er_sample_data[sample]['sample_bed_dip']="%.1f"%Dip
#-----------------------------
# er_samples method codes
# geographic coordinates: no
#-----------------------------
if found_tilt or found_geo:
self.er_sample_data[sample]['magic_method_codes']="SO-NO"
#-----
# Lab treatments and MagIC methods
#-----
if meas_line['treatment']=="N":
LT="LT-NO"
LP=""
MagRec["treatment_temp"]="273."
#MagRec["treatment_temp"]
elif meas_line['treatment']=="A":
LT="LT-AF-Z"
LP="LP-DIR-AF"
MagRec["treatment_ac_field"]="%.4f"%(float(meas_line['step'])*1e-3)
MagRec["treatment_temp"]="273."
#print MagRec["treatment_ac_field"],"treatment_ac_field"
elif meas_line['treatment']=="T":
LT="LT-T-Z"
LP="LP-DIR-T"
MagRec["treatment_temp"]="%.1f"%(float(meas_line['step'])+273.)
#print MagRec["treatment_temp"],"treatment_temp"
#if LT not in this_specimen_LT:
# this_specimen_LT.append(LT)
if LP!="" and LP not in this_specimen_LP:
this_specimen_LP.append(LP)
#MagRec["magic_experiment_name"]=MagRec["er_specimen_name"]+":"+":".join(this_specimen_LP)
MagRec["magic_method_codes"]=LT#+":"+":".join(this_specimen_LP)
MagRecs_this_specimen.append(MagRec)
#-----------------
# er_samples_data
#
if sample in list(self.er_sample_data.keys()):
self.er_sample_data[sample]['er_sample_name']=sample
self.er_sample_data[sample]['er_site_name']=MagRec["er_site_name"]
self.er_sample_data[sample]['er_location_name']=MagRec["er_location_name"]
measurement_running_number+=1
# add magic_experiment_name and magic_method_codes to magic_measurements.txt
for MagRec in MagRecs_this_specimen:
MagRec["magic_experiment_name"]=MagRec["er_specimen_name"]+":"+":".join(this_specimen_LP)
MagRec["magic_method_codes"]=MagRec["magic_method_codes"]+":"+":".join(this_specimen_LP)
MagRecs.append(MagRec)
#--
# write magic_measurements.txt
#--
MagRecs_fixed=self.merge_pmag_recs(MagRecs)
pmag.magic_write(os.path.join(self.WD, "magic_measurements.txt"), MagRecs_fixed, 'magic_measurements')
#--
# write er_samples.txt
#--
ErSamplesRecs=[]
samples=list(self.er_sample_data.keys())
for sample in samples:
ErSamplesRecs.append(self.er_sample_data[sample])
ErSamplesRecs_fixed=self.merge_pmag_recs(ErSamplesRecs)
pmag.magic_write(os.path.join(self.WD, "er_samples.txt"), ErSamplesRecs_fixed, 'er_samples')
MSG=" Files converted to MagIC format and merged into two files:\n\
magic_measurements.txt and er_samples.txt.\n\
Files saved in the current MagIC directory.\n\
Quit the GUI and restart it to view the data."
dlg1 = wx.MessageDialog(None,caption="Message:", message=MSG ,style=wx.OK|wx.ICON_INFORMATION)
dlg1.ShowModal()
dlg1.Destroy()
self.END=True
self.Destroy()
def merge_pmag_recs(self,old_recs):
# fix the headers of pmag recs
recs={}
recs=copy.deepcopy(old_recs)
headers=[]
for rec in recs:
for key in list(rec.keys()):
if key not in headers:
headers.append(key)
for rec in recs:
for header in headers:
if header not in list(rec.keys()):
rec[header]=""
return recs
def on_cancelButton(self,event):
self.Destroy()
def get_sample_name(self,specimen,sample_naming_convenstion):
if sample_naming_convenstion[0]=="sample=specimen":
sample=specimen
elif sample_naming_convenstion[0]=="no. of terminate characters":
n=int(sample_naming_convenstion[1])*-1
sample=specimen[:n]
elif sample_naming_convenstion[0]=="charceter delimited":
d=sample_naming_convenstion[1]
sample_splitted=specimen.split(d)
if len(sample_splitted)==1:
sample=sample_splitted[0]
else:
sample=d.join(sample_splitted[:-1])
return sample
def get_site_name(self,sample,site_naming_convenstion):
if site_naming_convenstion[0]=="site=sample":
site=sample
elif site_naming_convenstion[0]=="no. of terminate characters":
n=int(site_naming_convenstion[1])*-1
site=sample[:n]
elif site_naming_convenstion[0]=="charceter delimited":
d=site_naming_convenstion[1]
site_splitted=sample.split(d)
if len(site_splitted)==1:
site=site_splitted[0]
else:
site=d.join(site_splitted[:-1])
#print "d",d
#print "sample",sample
#print "site_splitted",site_splitted
#print "site",site
return site
def read_magic_file(self,path,sort_by_this_name):
DATA={}
fin=open(path,'r')
fin.readline()
line=fin.readline()
header=line.strip('\n').split('\t')
for line in fin.readlines():
tmp_data={}
tmp_line=line.strip('\n').split('\t')
for i in range(len(tmp_line)):
tmp_data[header[i]]=tmp_line[i]
if tmp_data[sort_by_this_name] in list(DATA.keys()):
print("-E- ERROR: magic file %s has more than one line for %s %s\n"%(path,sort_by_this_name,tmp_data[sort_by_this_name]))
DATA[tmp_data[sort_by_this_name]]=tmp_data
fin.close()
return(DATA)
#--------------------------------------------------------------
# Popupmenu
#--------------------------------------------------------------
class GBPopupMenu(wx.Menu):
def __init__(self,Data,magic_file,mag_meas_data,s,g_index,position):
self.g_index=g_index
self.s=s
self.Data=Data
self.mag_meas_data=mag_meas_data
self.magic_file=magic_file
#self.measurement_flag=measurement_flag
wx.Menu.__init__(self)
item = wx.MenuItem(self, wx.NewId(), "'good measurement'")
self.AppendItem(item)
self.Bind(wx.EVT_MENU, self.OnItemGood, item)
item = wx.MenuItem(self, wx.NewId(),"'bad measurement'")
self.AppendItem(item)
self.Bind(wx.EVT_MENU, self.OnItemBad, item)
def OnItemGood(self, event):
#print "good"
index=self.Data[self.s]['mag_meas_data_index'][self.g_index]
#print self.mag_meas_data[index]
self.mag_meas_data[index]['measurement_flag']='g'
self.write_good_bad_magic_measurements()
def OnItemBad(self, event):
#print "bad"
index=self.Data[self.s]['mag_meas_data_index'][self.g_index]
#print self.mag_meas_data[index]
self.mag_meas_data[index]['measurement_flag']='b'
self.write_good_bad_magic_measurements()
def write_good_bad_magic_measurements(self):
#print "write_good_bad_magic_measurements"
print("self.magic_file",self.magic_file)
pmag.magic_write(self.magic_file,self.mag_meas_data,"magic_measurements")
#--------------------------------------------------------------
# Change Acceptance criteria dialog
#--------------------------------------------------------------
class demag_criteria_dialog(wx.Dialog):
def __init__(self, parent, acceptance_criteria,title):
style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER
super(demag_criteria_dialog, self).__init__(parent, title=title,style=style)
self.acceptance_criteria=acceptance_criteria
self.InitUI(acceptance_criteria)
#self.SetSize((250, 200))
def InitUI(self,acceptance_criteria):
pnl1 = wx.Panel(self)
#-----------
# specimen criteria
#-----------
vbox = wx.BoxSizer(wx.VERTICAL)
bSizer1 = wx.StaticBoxSizer( wx.StaticBox( pnl1, wx.ID_ANY, "specimen acceptance criteria" ), wx.HORIZONTAL )
# Specimen criteria
window_list_specimens=['specimen_n','specimen_mad','specimen_dang','specimen_alpha95']
for key in window_list_specimens:
command="self.set_%s=wx.TextCtrl(pnl1,style=wx.TE_CENTER,size=(50,20))"%key
exec(command)
criteria_specimen_window = wx.GridSizer(2, len(window_list_specimens), 10, 10)
criteria_specimen_window.AddMany( [(wx.StaticText(pnl1,label="n",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="MAD",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="DANG",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="alpha95",style=wx.TE_CENTER), wx.EXPAND),
(self.set_specimen_n),
(self.set_specimen_mad),
(self.set_specimen_dang),
(self.set_specimen_alpha95)])
bSizer1.Add( criteria_specimen_window, 0, wx.ALIGN_LEFT|wx.ALL, 5 )
#-----------
# sample criteria
#-----------
bSizer2 = wx.StaticBoxSizer( wx.StaticBox( pnl1, wx.ID_ANY, "sample acceptance criteria" ), wx.HORIZONTAL )
#self.set_average_by_sample_or_site=wx.ComboBox(pnl1, -1,size=(150, -1), value = 'sample', choices=['sample','site'], style=wx.CB_READONLY)
# Sample criteria
window_list_samples=['sample_n','sample_n_lines','sample_n_planes','sample_k','sample_r','sample_alpha95']
for key in window_list_samples:
command="self.set_%s=wx.TextCtrl(pnl1,style=wx.TE_CENTER,size=(50,20))"%key
exec(command)
criteria_sample_window = wx.GridSizer(2, len(window_list_samples), 10, 10)
criteria_sample_window.AddMany( [(wx.StaticText(pnl1,label="n",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="n lines",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="n planes",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="k",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="r",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="alpha95",style=wx.TE_CENTER), wx.EXPAND),
(self.set_sample_n),
(self.set_sample_n_lines),
(self.set_sample_n_planes),
(self.set_sample_k),
(self.set_sample_r),
(self.set_sample_alpha95)])
bSizer2.Add( criteria_sample_window, 0, wx.ALIGN_LEFT|wx.ALL, 5 )
#-----------
# site criteria
#-----------
bSizer3 = wx.StaticBoxSizer( wx.StaticBox( pnl1, wx.ID_ANY, "site acceptance criteria" ), wx.HORIZONTAL )
# Site criteria
window_list_sites=['site_n','site_n_lines','site_n_planes','site_k','site_r','site_alpha95']
for key in window_list_sites:
command="self.set_%s=wx.TextCtrl(pnl1,style=wx.TE_CENTER,size=(50,20))"%key
exec(command)
criteria_site_window = wx.GridSizer(2, len(window_list_sites), 10, 10)
criteria_site_window.AddMany( [(wx.StaticText(pnl1,label="n",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="n lines",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="n planes",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="k",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="r",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="alpha95",style=wx.TE_CENTER), wx.EXPAND),
(self.set_site_n),
(self.set_site_n_lines),
(self.set_site_n_planes),
(self.set_site_k),
(self.set_site_r),
(self.set_site_alpha95)])
bSizer3.Add( criteria_site_window, 0, wx.ALIGN_LEFT|wx.ALL, 5 )
#-----------
#ok_sizer=self.CreateButtonSizer(wx.OK|wx.CANCEL)
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
self.okButton = wx.Button(pnl1, wx.ID_OK, "&OK")
self.cancelButton = wx.Button(pnl1, wx.ID_CANCEL, '&Cancel')
hbox3.Add(self.okButton)
hbox3.AddSpacer(10)
hbox3.Add(self.cancelButton )
#self.okButton.Bind(wx.EVT_BUTTON, self.OnOK)
#-----------
supported_crit=window_list_specimens+window_list_samples+window_list_sites
# initialize value:
for crit in supported_crit:
if crit not in list(acceptance_criteria.keys()):
continue
if acceptance_criteria[crit]['value']!="":
value=float(acceptance_criteria[crit]['value'])
if value!=-999:
decimal_points=acceptance_criteria[crit]['decimal_points']
command="self.set_%s.SetValue('%%.%if'%%(value))"%(crit,int(decimal_points))
exec(command)
#----------------------
vbox.AddSpacer(10)
vbox.Add(bSizer1, flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(10)
vbox.Add(bSizer2, flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(10)
vbox.Add(bSizer3, flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(10)
vbox.Add(hbox3, flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(10)
hbox_top=wx.BoxSizer(wx.HORIZONTAL)
hbox_top.AddSpacer(50)
hbox_top.Add(vbox)
hbox_top.AddSpacer(50)
pnl1.SetSizer(hbox_top)
hbox_top.Fit(self)
#class MyFrame(wx.Frame):
# def __init__(self, parent, id, title):
# wx.Frame.__init__(self, parent, id, title, size=(500,500))
#
# panel = wx.Panel(self, -1)
# wx.Button(panel, 1, 'Show Custom Dialog', (100,100))
# self.Bind (wx.EVT_BUTTON, self.OnShowCustomDialog, id=1)
#
# def OnShowCustomDialog(self, event):
# #dia = MyDialog(self, -1, 'buttons')
#
# dia=demag_criteria_dialog(None, {},title='Set Acceptance Criteria')
# dia.Center()
# dia.ShowModal()
# dia.Destroy()
#
#class MyApp(wx.App):
# def OnInit(self):
# frame = MyFrame(None, -1, 'customdialog1.py')
# frame.Show(True)
# frame.Centre()
# return True
##
#app = MyApp(0)
#app.MainLoop()
#if __name__ == '__main__':
# app = wx.App()
# app.frame = demag_criteria_dialog(None, {},title='Set Acceptance Criteria')
# app.frame.Show()
# app.frame.Center()
# app.MainLoop()
#if __name__ == '__main__':
# app = wx.App()
# app.frame = magic_pmag_tables_dialog(None,"./",{},{})
# app.frame.Center()
# #alignToTop(app.frame)
# #dw, dh = wx.DisplaySize()
# #w, h = app.frame.GetSize()
# #print 'display 2', dw, dh
# #print "gui 2", w, h
# app.frame.Show()
# app.MainLoop()
| {
"content_hash": "e19d245661b64dda6391e87e0df7bcb0",
"timestamp": "",
"source": "github",
"line_count": 1600,
"max_line_length": 247,
"avg_line_length": 44.53375,
"alnum_prop": 0.5395205883178488,
"repo_name": "Caoimhinmg/PmagPy",
"id": "9feb77509e0e74b10ddff24afb1f7eceddcc30bf",
"size": "71277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dialogs/demag_dialogs.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "D",
"bytes": "5748"
},
{
"name": "HTML",
"bytes": "63859"
},
{
"name": "Inno Setup",
"bytes": "3675"
},
{
"name": "Jupyter Notebook",
"bytes": "14175459"
},
{
"name": "Python",
"bytes": "14896053"
},
{
"name": "Shell",
"bytes": "6986"
},
{
"name": "TeX",
"bytes": "3146"
}
],
"symlink_target": ""
} |
import os
import select
import threading
class SelectReactor(object):
TIMEOUT = 0.5 if os.name == "nt" else None
def __init__(self):
self._active = False
self._readfds = set()
def register_read(self, fileobj):
self._readfds.append(fileobj)
def run(self):
self._active = True
while self._active:
rlist, _, _ = select.select(self._readfds, (), (), self.TIMEOUT)
for fileobj in rlist:
data = fileobj.recv(16000)
if not data:
fileobj.close()
self._readfds.discard(fileobj)
_reactor = SelectReactor()
def _reactor_thread():
pass
_thd = None
def start_reactor():
global _thd
if _thd is None:
raise ValueError("already started")
_thd = threading.Thread("rpyc reactor thread", target = _reactor_thread)
_thd.setDaemon(True)
_thd.start()
| {
"content_hash": "bd50f7e90c5a0a75e43d752640452dd2",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 76,
"avg_line_length": 23.743589743589745,
"alnum_prop": 0.5680345572354212,
"repo_name": "pyq881120/rpyc",
"id": "1e0046b12ace20498cbd28c9a796c887397e9b52",
"size": "926",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "rpyc/core/reactor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "270849"
}
],
"symlink_target": ""
} |
"""
Provides a full CRUD template, all inclusive. Very basic example is as follows.
class Widget(BaseModel):
name = ndb.StringProperty()
def __unicode__(self):
return self.name or '<Untitled Widget>'
class WidgetBlueprint(GenericCrud):
model = Widget
blueprint = WidgetBlueprint('widgets', 'widgets')
app.register_blueprint(blueprint, url_prefix='/widgets')
It's that easy.
"""
from flask import Blueprint
from GenericViews.GenericRetrieve import GenericRetrieve
from GenericViews.GenericList import GenericList
from GenericViews.GenericEditNew import GenericEditNew
from GenericViews.GenericEditExisting import GenericEditExisting
from GenericViews.GenericDelete import GenericDelete
class GenericCrud(Blueprint):
model = None
base_template = "base.html"
variable_current_object = 'current_object'
variable_rows = 'rows'
variable_next_cursor = 'next_cursor'
variable_last_cursor = 'last_cursor'
variable_form = 'form'
name_singular = None
name_plural = None
form_exclude = ['class'] # Exclude these when editing/viewing fields.
form_include = None # IF specified, only show these fields
list_fields = None # Include these when listing entities.
wtforms_field_args = None # Field args to pass to wtform_appengine model_form
page_size = 25
render_as = 'table'
not_found_template = '404.html'
permission_denied_template = '403.html'
sleep_on_not_found = .25 # To slow down brute-force URL guessing schemes, sleep this many seconds each time a 404 is generated.
extra_context = {} # Extra variables passed to complates
decorators = []
enable_list = True
enable_retrieve = True
enable_delete = True
enable_edit = True
enable_new = True
list_template = 'generic-list.html'
retrieve_template = 'generic-view.html'
delete_template = 'generic-delete.html'
edit_template = 'generic-editor.html'
new_template = 'generic-editor.html'
def __init__(self, name, import_name, static_folder=None, static_url_path=None, template_folder='GenericViews/templates', url_prefix=None, subdomain=None,
url_defaults=None):
super(GenericCrud, self).__init__(name, import_name, static_folder, static_url_path, template_folder, url_prefix, subdomain, url_defaults)
view_names = {}
for view_type in ('list', 'retrieve', 'delete', 'edit', 'new'):
if getattr(self, 'enable_%s' % view_type):
view_names[view_type] = '%s.%s' % (name, view_type)
if view_type == 'list':
view_names['list_cursor'] = '%s.list_cursor' % name
else:
view_names[view_type] = None
if view_type == 'list':
view_names['list_cursor'] = None
class GenericConfig(object):
model = self.model
base_template = self.base_template
variable_current_object = self.variable_current_object
variable_rows = self.variable_rows
variable_next_cursor = self.variable_next_cursor
variable_last_cursor = self.variable_last_cursor
variable_form = self.variable_form
name_singular = self.name_singular
name_plural = self.name_plural
form_exclude = self.form_exclude
form_include = self.form_include
list_fields = self.list_fields
wtforms_field_args = self.wtforms_field_args
page_size = self.page_size
not_found_template = self.not_found_template
permission_denied_template = self.permission_denied_template
sleep_on_not_found = self.sleep_on_not_found
extra_context = self.extra_context
decorators = self.decorators
retrieve_view = view_names['retrieve']
new_view = view_names['new']
list_view = view_names['list']
list_view_cursor = view_names['list_cursor']
delete_view = view_names['delete']
edit_view = view_names['edit']
if self.enable_list:
class List(GenericConfig, GenericList):
pass
List.template = self.list_template
self.add_url_rule('/', view_func=List.as_view('list'))
if self.enable_retrieve:
class Retrieve(GenericConfig, GenericRetrieve):
pass
Retrieve.template = self.retrieve_template
self.add_url_rule('/<urlsafe>/', view_func=Retrieve.as_view('retrieve'))
if self.enable_new:
class New(GenericConfig, GenericEditNew):
pass
New.template = self.new_template
self.add_url_rule('/new/', view_func=New.as_view('new'))
if self.enable_edit:
class Edit(GenericConfig, GenericEditExisting):
pass
Edit.template = self.edit_template
self.add_url_rule('/<urlsafe>/edit/', view_func=Edit.as_view('edit'))
if self.enable_delete:
class Delete(GenericConfig, GenericDelete):
pass
Delete.template = self.delete_template
self.add_url_rule('/<urlsafe>/delete/', view_func=Delete.as_view('delete'))
| {
"content_hash": "b321c1a91be697a5df454af59d1bd84b",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 158,
"avg_line_length": 33.98726114649681,
"alnum_prop": 0.6178785607196402,
"repo_name": "kkinder/GAEStarterKit",
"id": "8dcc41435f40e0636a11954ba602b33b315b0119",
"size": "5336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GenericViews/GenericCrud.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1156254"
},
{
"name": "CoffeeScript",
"bytes": "9178"
},
{
"name": "HTML",
"bytes": "64367"
},
{
"name": "JavaScript",
"bytes": "2042044"
},
{
"name": "Python",
"bytes": "112984"
}
],
"symlink_target": ""
} |
from picamera import PiCamera
import subprocess as sb
import os
def capture_img(img_path='img.jpg', res=(1024,768), vflip=True):
"""
Captures image with PiCamera and saves it to given path.
It cannot be used when camera is active
(for example when it is used by mjpg-streamer). In that case
exception ``PiCameraMMALError`` will be raised.
"""
camera = PiCamera()
camera.resolution = res
camera.vflip = vflip
print('CAMERA: Capturing image...')
camera.capture(img_path)
print('CAMERA: Image saved in {}'.format(img_path))
camera.close()
class MjpgStreamer:
"""
**Interface for controlling mjpg-streamer process**
"""
def __init__(self, path, resolution=(640, 480), fps=25, vflip=True):
"""
``path`` -- path to mjpg-streamer location.
For example '/home/pi/mjpg-streamer'.
"""
self.resolution = resolution
self.fps = fps
self.path = path
self.stream_process = None
self.vflip = vflip
def start_stream(self):
"""
Starts streaming process. Stream is served via
web server (port 8080).
If there was previous streaming process
created with this method, it will be terminated.
"""
if self.stream_process is not None:
print("STREAMER: Killing previous stream (PID: {})".format(self.stream_process.pid))
self.stream_process.kill()
input_str = 'input_raspicam.so -x {} -y {} -fps {}'.format(self.resolution[0],
self.resolution[1],
self.fps)
if self.vflip == True:
input_str += ' -vf'
output_str = 'output_http.so -w {}/www'.format(self.path)
plugin_env = os.environ.copy()
plugin_env['LD_LIBRARY_PATH'] = self.path
print('STREAMER: Starting stream...')
self.stream_process = sb.Popen([self.path + '/mjpg_streamer',
'-o', output_str,
'-i', input_str], env=plugin_env)
print('STREAMER: Process running with PID {}'.format(self.stream_process.pid))
def stop_stream(self):
"""
Kills created streaming process.
Does nothing when no known stream process exists.
"""
if self.stream_process is None:
print('STREAMER: No streaming process is running with this instance.')
return
print('STREAMER: Terminating mjpg_streamer process')
self.stream_process.kill()
self.stream_process = None | {
"content_hash": "19adba11bd444766a6d248e2ddfb0ea7",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 87,
"avg_line_length": 28.317073170731707,
"alnum_prop": 0.6507321274763135,
"repo_name": "ggljzr/pytRobot",
"id": "34e6b0c74c970f522bd4d289699829a9d3e6b8ed",
"size": "2322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytrobot/camera.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1010"
},
{
"name": "HTML",
"bytes": "1674"
},
{
"name": "Python",
"bytes": "16766"
}
],
"symlink_target": ""
} |
"""
SharedConnection
"""
import inspect
import scipy
import numpy as np
from functions import extract, convolve1d, convolve2d, best_fft_shape
from connection import Connection, ConnectionError
from numpy.fft import fft, ifft
from numpy.fft import fft2, ifft2
from numpy.fft import rfft, irfft
from numpy.fft import rfft2, irfft2
from numpy.fft import fftshift, ifftshift
#from scipy.fftpack import fft, ifft, fft2, ifft2
#from numpy import fftshift, ifftshift
#from scipy.fftpack import rfft, irfft, rfft2, irfft2
class SharedConnection(Connection):
""" """
def __init__(self, source=None, target=None, weights=None, toric=False, fft=True):
""" """
Connection.__init__(self, source, target, toric)
self._src_rows = None
self._src_cols = None
self._fft = fft
self.setup_weights(weights)
self.setup_equation(None)
def setup_weights(self, weights):
""" Setup weights """
# If we have a toric connection, kernel cannot be greater than source
# in any dimension
if self._toric:
s = np.array(self.source.shape)
w = np.array(weights.shape)
weights = extract(weights, np.minimum(s,w), w//2)
# 1d convolution case
# -------------------
if len(self.source.shape) == len(self.target.shape) == 1:
if len(weights.shape) != 1:
raise ConnectionError, \
'''Shared connection requested but weights matrix shape does not match.'''
if self.source.shape != self.target.shape:
rows = np.rint((np.linspace(0,1,self.target.shape[0])
*(self.source.shape[0]-1))).astype(int)
self._src_rows = rows
if self._fft:
src_shape = np.array(self.source.shape)
wgt_shape = np.array(weights.shape)
K = np.nan_to_num(weights)[::-1]
if self._toric:
K_ = extract(K, src_shape, wgt_shape//2)
self._fft_weights = rfft(ifftshift(K_))
else:
size = src_shape+wgt_shape//2
shape = best_fft_shape(size)
self._fft_weights = rfft(K,shape[0])
i0 = wgt_shape[0]//2
i1 = i0+src_shape[0]
self._fft_indices = slice(i0,i1)
self._fft_shape = shape
# m = self.source.shape[0]
# p = weights.shape[0]
# if self._toric:
# _weights = extract(weights[::-1], (m,), (np.floor(p/2.0),) )
# else:
# self._src_holder = np.zeros(2*m+1)
# _weights = extract(weights[::-1], (2*m+1,), (np.floor(p/2.0),) )
# self._fft_weights = fft(ifftshift(np.nan_to_num(_weights)))
self._mask = np.ones(weights.shape)
self._mask[np.isnan(weights).nonzero()] = 0
self._weights = np.nan_to_num(weights)
# 2d convolution case
# -------------------
elif len(self.source.shape) == len(self.target.shape) == 2:
if len(weights.shape) != 2:
raise ConnectionError, \
'''Shared connection requested but weights matrix shape does not match.'''
if self.source.shape != self.target.shape:
rows = np.rint((np.linspace(0,1,self.target.shape[0])
*(self.source.shape[0]-1))).astype(int)
cols = np.rint((np.linspace(0,1,self.target.shape[1])
*(self.source.shape[1]-1))).astype(int)
self._src_rows = rows.reshape((len(rows),1))
self._src_cols = cols.reshape((1,len(cols)))
if self._fft:
src_shape = np.array(self.source.shape)
wgt_shape = np.array(weights.shape)
K = np.nan_to_num(weights)[::-1,::-1]
if self._toric:
K_ = extract(K, src_shape, wgt_shape//2)
self._fft_weights = rfft2(ifftshift(K_))
else:
size = src_shape+wgt_shape//2
shape = best_fft_shape(size)
self._fft_weights = rfft2(K,shape)
i0 = wgt_shape[0]//2
i1 = i0+src_shape[0]
j0 = wgt_shape[1]//2
j1 = j0+src_shape[1]
self._fft_indices = slice(i0,i1),slice(j0,j1)
self._fft_shape = shape
self._mask = np.ones(weights.shape)
self._mask[np.isnan(weights).nonzero()] = 0
self._weights = np.nan_to_num(weights)
dtype = weights.dtype
self._USV = scipy.linalg.svd(np.nan_to_num(weights))
U,S,V = self._USV
self._USV = U.astype(dtype), S.astype(dtype), V.astype(dtype)
# Higher dimensional case
# ------------------------
else:
raise ConnectionError, \
'''Shared connection requested but dimensions are too high (> 2).'''
def output(self):
""" """
# One dimension
if len(self._source.shape) == 1:
source = self._actual_source
# Use FFT convolution
if self._fft:
if not self._toric:
P = rfft(source,self._fft_shape[0])*self._fft_weights
R = irfft(P, self._fft_shape[0]).real
R = R[self._fft_indices]
else:
P = rfft(source)*self._fft_weights
R = irfft(P,source.shape[0]).real
# if self._toric:
# R = ifft(fft(source)*self._fft_weights).real
# else:
# n = source.shape[0]
# self._src_holder[n//2:n//2+n] = source
# R = ifft(fft(self._src_holder)*self._fft_weights)
# R = R.real[n//2:n//2+n]
# Use regular convolution
else:
R = convolve1d(source, self._weights[::-1], self._toric)
if self._src_rows is not None:
R = R[self._src_rows]
return R.reshape(self._target.shape)
# Two dimensions
else:
source = self._actual_source
# Use FFT convolution
if self._fft:
if not self._toric:
P = rfft2(source,self._fft_shape)*self._fft_weights
R = irfft2(P, self._fft_shape).real
R = R[self._fft_indices]
else:
P = rfft2(source)*self._fft_weights
R = irfft2(P,source.shape).real
# Use SVD convolution
else:
R = convolve2d(source, self._weights, self._USV, self._toric)
if self._src_rows is not None and self._src_cols is not None:
R = R[self._src_rows, self._src_cols]
return R.reshape(self._target.shape)
def __getitem__(self, key):
""" """
src = self.source
dst = self.target
kernel = self._weights
src_shape = np.array(src.shape, dtype=float)
dst_shape = np.array(dst.shape, dtype=float)
kernel_shape = np.array(kernel.shape, dtype=float)
dst_key = np.array(key, dtype=float)
src_key = np.rint((dst_key/(dst_shape-1))*(src_shape-1)).astype(int)
scale = 1 #dst_shape/src_shape
Z = np.zeros(src.shape) * np.NaN
for i in range(kernel.size):
k_key = np.array(np.unravel_index(i, kernel.shape))
if self._toric:
key = (src_key + scale*k_key - kernel_shape//2).astype(int) % src_shape
else:
key = (src_key + scale*k_key - kernel_shape//2).astype(int)
bad = False
for k in range(len(key)):
if key[k] < 0 or key[k] >= Z.shape[k]: bad = True
if not bad:
if self._mask[tuple(k_key.tolist())]:
Z[tuple(key.tolist())] = kernel[tuple(k_key.tolist())]
return Z
| {
"content_hash": "d9a686886fde51c51a0fab8fca09f8f8",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 94,
"avg_line_length": 39.22380952380952,
"alnum_prop": 0.4911982517907005,
"repo_name": "rougier/dana",
"id": "0e9d75e3fc3a27a6522fd75f11f247fe0b837eb4",
"size": "10081",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dana/shared_connection.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "9273"
},
{
"name": "Makefile",
"bytes": "859"
},
{
"name": "Python",
"bytes": "414214"
},
{
"name": "Smarty",
"bytes": "158"
}
],
"symlink_target": ""
} |
import order
def user_order_by(self, field):
"""
Queryset method ordering objects by user ordering field.
"""
# Get ordering model.
model_label = order.utils.resolve_labels('.'.join(\
[self.model._meta.app_label, self.model._meta.object_name]))
orderitem_set = getattr(self.model, \
order.utils.resolve_order_item_related_set_name(model_label))
order_model = orderitem_set.related.model
# Resolve ordering model table name.
db_table = order_model._meta.db_table
# Add ordering field as extra queryset fields.
pk_name = self.model._meta.pk.attname
# If we have a descending query remove '-' from field name when quering.
sanitized_field = field.lstrip('-')
extra_select = {
sanitized_field: '(SELECT %s from %s WHERE item_id=%s.%s)' % \
(sanitized_field, db_table, self.model._meta.db_table, pk_name)
}
# Use original field name when ordering to allow for descending.
return self.extra(select=extra_select).all().order_by(field)
| {
"content_hash": "50b0ca7371e819783b0e12cd482261dd",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 79,
"avg_line_length": 35.03333333333333,
"alnum_prop": 0.6546146527117032,
"repo_name": "praekelt/django-order",
"id": "faecc2daacad296ee78ebffaa2108be5fe98dfe7",
"size": "1051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "order/managers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12250"
}
],
"symlink_target": ""
} |
"""
Example Airflow DAG that creates, patches and deletes a Cloud SQL instance, and also
creates, patches and deletes a database inside the instance, in Google Cloud.
This DAG relies on the following OS environment variables
https://airflow.apache.org/concepts.html#variables
* GCP_PROJECT_ID - Google Cloud project for the Cloud SQL instance.
* INSTANCE_NAME - Name of the Cloud SQL instance.
* DB_NAME - Name of the database inside a Cloud SQL instance.
"""
from __future__ import annotations
import os
from datetime import datetime
from urllib.parse import urlsplit
from airflow import models
from airflow.models.xcom_arg import XComArg
from airflow.providers.google.cloud.operators.cloud_sql import (
CloudSQLCreateInstanceDatabaseOperator,
CloudSQLCreateInstanceOperator,
CloudSQLDeleteInstanceDatabaseOperator,
CloudSQLDeleteInstanceOperator,
CloudSQLExportInstanceOperator,
CloudSQLImportInstanceOperator,
CloudSQLInstancePatchOperator,
CloudSQLPatchInstanceDatabaseOperator,
)
from airflow.providers.google.cloud.operators.gcs import (
GCSBucketCreateAclEntryOperator,
GCSCreateBucketOperator,
GCSDeleteBucketOperator,
GCSObjectCreateAclEntryOperator,
)
from airflow.utils.trigger_rule import TriggerRule
ENV_ID = os.environ.get("SYSTEM_TESTS_ENV_ID")
PROJECT_ID = os.environ.get("SYSTEM_TESTS_GCP_PROJECT")
DAG_ID = "cloudsql"
INSTANCE_NAME = f"{DAG_ID}-{ENV_ID}-instance"
DB_NAME = f"{DAG_ID}-{ENV_ID}-db"
BUCKET_NAME = f"{DAG_ID}_{ENV_ID}_bucket"
FILE_NAME = f"{DAG_ID}_{ENV_ID}_exportImportTestFile"
FILE_URI = f"gs://{BUCKET_NAME}/{FILE_NAME}"
FAILOVER_REPLICA_NAME = f"{INSTANCE_NAME}-failover-replica"
READ_REPLICA_NAME = f"{INSTANCE_NAME}-read-replica"
# Bodies below represent Cloud SQL instance resources:
# https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances
# [START howto_operator_cloudsql_create_body]
body = {
"name": INSTANCE_NAME,
"settings": {
"tier": "db-n1-standard-1",
"backupConfiguration": {"binaryLogEnabled": True, "enabled": True, "startTime": "05:00"},
"activationPolicy": "ALWAYS",
"dataDiskSizeGb": 30,
"dataDiskType": "PD_SSD",
"databaseFlags": [],
"ipConfiguration": {
"ipv4Enabled": True,
"requireSsl": True,
},
"locationPreference": {"zone": "europe-west4-a"},
"maintenanceWindow": {"hour": 5, "day": 7, "updateTrack": "canary"},
"pricingPlan": "PER_USE",
"replicationType": "ASYNCHRONOUS",
"storageAutoResize": True,
"storageAutoResizeLimit": 0,
"userLabels": {"my-key": "my-value"},
},
"failoverReplica": {"name": FAILOVER_REPLICA_NAME},
"databaseVersion": "MYSQL_5_7",
"region": "europe-west4",
}
# [END howto_operator_cloudsql_create_body]
# [START howto_operator_cloudsql_create_replica]
read_replica_body = {
"name": READ_REPLICA_NAME,
"settings": {
"tier": "db-n1-standard-1",
},
"databaseVersion": "MYSQL_5_7",
"region": "europe-west4",
"masterInstanceName": INSTANCE_NAME,
}
# [END howto_operator_cloudsql_create_replica]
# [START howto_operator_cloudsql_patch_body]
patch_body = {
"name": INSTANCE_NAME,
"settings": {
"dataDiskSizeGb": 35,
"maintenanceWindow": {"hour": 3, "day": 6, "updateTrack": "canary"},
"userLabels": {"my-key-patch": "my-value-patch"},
},
}
# [END howto_operator_cloudsql_patch_body]
# [START howto_operator_cloudsql_export_body]
export_body = {
"exportContext": {
"fileType": "sql",
"uri": FILE_URI,
"sqlExportOptions": {"schemaOnly": False},
"offload": True,
}
}
# [END howto_operator_cloudsql_export_body]
# [START howto_operator_cloudsql_import_body]
import_body = {"importContext": {"fileType": "sql", "uri": FILE_URI}}
# [END howto_operator_cloudsql_import_body]
# [START howto_operator_cloudsql_db_create_body]
db_create_body = {"instance": INSTANCE_NAME, "name": DB_NAME, "project": PROJECT_ID}
# [END howto_operator_cloudsql_db_create_body]
# [START howto_operator_cloudsql_db_patch_body]
db_patch_body = {"charset": "utf16", "collation": "utf16_general_ci"}
# [END howto_operator_cloudsql_db_patch_body]
with models.DAG(
DAG_ID,
schedule=None,
start_date=datetime(2021, 1, 1),
catchup=False,
tags=["example", "cloud_sql"],
) as dag:
create_bucket = GCSCreateBucketOperator(task_id="create_bucket", bucket_name=BUCKET_NAME)
# ############################################## #
# ### INSTANCES SET UP ######################### #
# ############################################## #
# [START howto_operator_cloudsql_create]
sql_instance_create_task = CloudSQLCreateInstanceOperator(
body=body, instance=INSTANCE_NAME, task_id="sql_instance_create_task"
)
# [END howto_operator_cloudsql_create]
sql_instance_read_replica_create = CloudSQLCreateInstanceOperator(
body=read_replica_body,
instance=READ_REPLICA_NAME,
task_id="sql_instance_read_replica_create",
)
# ############################################## #
# ### MODIFYING INSTANCE AND ITS DATABASE ###### #
# ############################################## #
# [START howto_operator_cloudsql_patch]
sql_instance_patch_task = CloudSQLInstancePatchOperator(
body=patch_body, instance=INSTANCE_NAME, task_id="sql_instance_patch_task"
)
# [END howto_operator_cloudsql_patch]
# [START howto_operator_cloudsql_db_create]
sql_db_create_task = CloudSQLCreateInstanceDatabaseOperator(
body=db_create_body, instance=INSTANCE_NAME, task_id="sql_db_create_task"
)
# [END howto_operator_cloudsql_db_create]
# [START howto_operator_cloudsql_db_patch]
sql_db_patch_task = CloudSQLPatchInstanceDatabaseOperator(
body=db_patch_body,
instance=INSTANCE_NAME,
database=DB_NAME,
task_id="sql_db_patch_task",
)
# [END howto_operator_cloudsql_db_patch]
# ############################################## #
# ### EXPORTING & IMPORTING SQL ################ #
# ############################################## #
file_url_split = urlsplit(FILE_URI)
# For export & import to work we need to add the Cloud SQL instance's Service Account
# write access to the destination GCS bucket.
service_account_email = XComArg(sql_instance_create_task, key="service_account_email")
# [START howto_operator_cloudsql_export_gcs_permissions]
sql_gcp_add_bucket_permission_task = GCSBucketCreateAclEntryOperator(
entity=f"user-{service_account_email}",
role="WRITER",
bucket=file_url_split[1], # netloc (bucket)
task_id="sql_gcp_add_bucket_permission_task",
)
# [END howto_operator_cloudsql_export_gcs_permissions]
# [START howto_operator_cloudsql_export]
sql_export_task = CloudSQLExportInstanceOperator(
body=export_body, instance=INSTANCE_NAME, task_id="sql_export_task"
)
# [END howto_operator_cloudsql_export]
# For import to work we need to add the Cloud SQL instance's Service Account
# read access to the target GCS object.
# [START howto_operator_cloudsql_import_gcs_permissions]
sql_gcp_add_object_permission_task = GCSObjectCreateAclEntryOperator(
entity=f"user-{service_account_email}",
role="READER",
bucket=file_url_split[1], # netloc (bucket)
object_name=file_url_split[2][1:], # path (strip first '/')
task_id="sql_gcp_add_object_permission_task",
)
# [END howto_operator_cloudsql_import_gcs_permissions]
# [START howto_operator_cloudsql_import]
sql_import_task = CloudSQLImportInstanceOperator(
body=import_body, instance=INSTANCE_NAME, task_id="sql_import_task"
)
# [END howto_operator_cloudsql_import]
# ############################################## #
# ### DELETING A DATABASE FROM AN INSTANCE ##### #
# ############################################## #
# [START howto_operator_cloudsql_db_delete]
sql_db_delete_task = CloudSQLDeleteInstanceDatabaseOperator(
instance=INSTANCE_NAME, database=DB_NAME, task_id="sql_db_delete_task"
)
# [END howto_operator_cloudsql_db_delete]
sql_db_delete_task.trigger_rule = TriggerRule.ALL_DONE
# ############################################## #
# ### INSTANCES TEAR DOWN ###################### #
# ############################################## #
# [START howto_operator_cloudsql_replicas_delete]
sql_instance_failover_replica_delete_task = CloudSQLDeleteInstanceOperator(
instance=FAILOVER_REPLICA_NAME,
task_id="sql_instance_failover_replica_delete_task",
)
sql_instance_read_replica_delete_task = CloudSQLDeleteInstanceOperator(
instance=READ_REPLICA_NAME, task_id="sql_instance_read_replica_delete_task"
)
# [END howto_operator_cloudsql_replicas_delete]
sql_instance_failover_replica_delete_task.trigger_rule = TriggerRule.ALL_DONE
sql_instance_read_replica_delete_task.trigger_rule = TriggerRule.ALL_DONE
# [START howto_operator_cloudsql_delete]
sql_instance_delete_task = CloudSQLDeleteInstanceOperator(
instance=INSTANCE_NAME, task_id="sql_instance_delete_task"
)
# [END howto_operator_cloudsql_delete]
sql_instance_delete_task.trigger_rule = TriggerRule.ALL_DONE
delete_bucket = GCSDeleteBucketOperator(
task_id="delete_bucket", bucket_name=BUCKET_NAME, trigger_rule=TriggerRule.ALL_DONE
)
(
# TEST SETUP
create_bucket
# TEST BODY
>> sql_instance_create_task
>> sql_instance_read_replica_create
>> sql_instance_patch_task
>> sql_db_create_task
>> sql_db_patch_task
>> sql_gcp_add_bucket_permission_task
>> sql_export_task
>> sql_gcp_add_object_permission_task
>> sql_import_task
>> sql_db_delete_task
>> sql_instance_failover_replica_delete_task
>> sql_instance_read_replica_delete_task
>> sql_instance_delete_task
# TEST TEARDOWN
>> delete_bucket
)
# Task dependencies created via `XComArgs`:
# sql_instance_create_task >> sql_gcp_add_bucket_permission_task
# sql_instance_create_task >> sql_gcp_add_object_permission_task
# ### Everything below this line is not part of example ###
# ### Just for system tests purpose ###
from tests.system.utils.watcher import watcher
# This test needs watcher in order to properly mark success/failure
# when "tearDown" task with trigger rule is part of the DAG
list(dag.tasks) >> watcher()
from tests.system.utils import get_test_run # noqa: E402
# Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest)
test_run = get_test_run(dag)
| {
"content_hash": "c262636dc73f429fa6c42e59db00fe87",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 97,
"avg_line_length": 36.95578231292517,
"alnum_prop": 0.6438104003681546,
"repo_name": "nathanielvarona/airflow",
"id": "2fe8ee8ed6ff1a1898b33fec39b4659f5357d0d3",
"size": "11652",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/system/providers/google/cloud/cloud_sql/example_cloud_sql.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "70681"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173025"
},
{
"name": "JavaScript",
"bytes": "142848"
},
{
"name": "Jinja",
"bytes": "38895"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23169682"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211967"
},
{
"name": "TypeScript",
"bytes": "484556"
}
],
"symlink_target": ""
} |
from wtforms import TextField
from flask.ext.wtf import Form
from wtforms.validators import Required, Email
class SignupForm(Form):
email = TextField('Email Address', validators=[Required(), Email()])
| {
"content_hash": "df7fd2b3ed3ca19bab8253e1436f5591",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 72,
"avg_line_length": 29.571428571428573,
"alnum_prop": 0.7681159420289855,
"repo_name": "angstwad/linky",
"id": "43df66bd9cc8eaf158863d1195e740cf4c7343c3",
"size": "852",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "linky/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "68"
},
{
"name": "JavaScript",
"bytes": "327"
},
{
"name": "Python",
"bytes": "9387"
}
],
"symlink_target": ""
} |
class State(object):
"""A state has an operation, and can be moved into the next state given an input
"""
@staticmethod
def run(self, context):
raise NotImplementedError("Run not immplemented")
@staticmethod
def next(self, context):
raise NotImplementedError("Next not implemented")
class StateMachine(object):
"""Takes a list of inputs to move from state to state using a template method
"""
def __init__(self, initial_state):
self.current_state = initial_state
def run_all(self):
while True:
self.current_state.run(self)
self.current_state = self.current_state.next(self)
if self.current_state is None:
break
| {
"content_hash": "d8a6f24fecb5467f3397a18e9571de62",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 84,
"avg_line_length": 30.791666666666668,
"alnum_prop": 0.6332882273342354,
"repo_name": "colorfuldisaster/adolf-scriptler",
"id": "8894f3ed528b8a4d2b2396c693bc6bcfded6e632",
"size": "739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "secrethitler/statemachine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39431"
}
],
"symlink_target": ""
} |
import os
import sys
from configparser import ConfigParser
from datetime import datetime
from irma.common.utils.utils import timestamp
from irma.common.plugins import PluginBase
from irma.common.plugin_result import PluginResult
from irma.common.base.utils import IrmaProbeType
from irma.common.plugins import ModuleDependency, FileDependency
from irma.common.plugins import PluginLoadError
class PEiDPlugin(PluginBase):
class PEiDResult:
ERROR = -1
FOUND = 1
NOT_FOUND = 0
# =================
# plugin metadata
# =================
_plugin_name_ = "PEiD"
_plugin_display_name_ = "PEiD PE Packer Identifier"
_plugin_author_ = "Quarkslab"
_plugin_version_ = "1.0.0"
_plugin_category_ = IrmaProbeType.metadata
_plugin_description_ = "Plugin to run files against PEiD signatures"
_plugin_dependencies_ = [
ModuleDependency(
'pefile',
help='See requirements.txt for needed dependencies'
),
ModuleDependency(
'peutils',
help='See requirements.txt for needed dependencies'
),
FileDependency(
os.path.join(os.path.dirname(__file__), 'config.ini')
)
]
_plugin_mimetype_regexp = 'PE32'
@classmethod
def verify(cls):
# load default configuration file
config = ConfigParser()
config.read(os.path.join(os.path.dirname(__file__), 'config.ini'))
sign_path = config.get('PEiD', 'sign_path')
# check for configured signatures path
if not os.path.exists(sign_path):
raise PluginLoadError("{0}: verify() failed because "
"signatures file not found."
"".format(cls.__name__))
# =============
# constructor
# =============
def __init__(self):
config = ConfigParser()
config.read(os.path.join(os.path.dirname(__file__), 'config.ini'))
sign_path = config.get('PEiD', 'sign_path')
peutils = sys.modules['peutils']
data = open(sign_path, "r", encoding="utf8", errors="ignore").read()
self.signatures = peutils.SignatureDatabase(data=data)
def analyze(self, filename):
pefile = sys.modules['pefile']
try:
pe = pefile.PE(filename)
results = self.signatures.match(pe)
if results is None:
return self.PEiDResult.NOT_FOUND, "No match found"
else:
return self.PEiDResult.FOUND, results[0]
except pefile.PEFormatError:
return self.PEiDResult.NOT_FOUND, "Not a PE"
# ==================
# probe interfaces
# ==================
def run(self, paths):
results = PluginResult(name=type(self).plugin_display_name,
type=type(self).plugin_category,
version=None)
try:
started = timestamp(datetime.utcnow())
(status, response) = self.analyze(paths)
stopped = timestamp(datetime.utcnow())
results.duration = stopped - started
results.status = status
results.results = response
except Exception as e:
results.status = self.PEiDResult.ERROR
results.error = type(e).__name__ + " : " + str(e)
return results
| {
"content_hash": "ca5ba291771948e5f9b85ca27d26a892",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 76,
"avg_line_length": 33.53465346534654,
"alnum_prop": 0.5701210510776499,
"repo_name": "quarkslab/irma",
"id": "aac936d4944797a8091c2ba3ee363dd015b83157",
"size": "3912",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "probe/modules/metadata/peid/plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "79"
},
{
"name": "CSS",
"bytes": "86535"
},
{
"name": "DIGITAL Command Language",
"bytes": "68"
},
{
"name": "Gherkin",
"bytes": "2366"
},
{
"name": "HTML",
"bytes": "26577"
},
{
"name": "JavaScript",
"bytes": "1774854"
},
{
"name": "Jinja",
"bytes": "2672"
},
{
"name": "Less",
"bytes": "13774"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "PowerShell",
"bytes": "15660"
},
{
"name": "Python",
"bytes": "797592"
},
{
"name": "Shell",
"bytes": "61907"
}
],
"symlink_target": ""
} |
"""SCons.Variables.PathVariable
This file defines an option type for SCons implementing path settings.
To be used whenever a a user-specified path override should be allowed.
Arguments to PathVariable are:
option-name = name of this option on the command line (e.g. "prefix")
option-help = help string for option
option-dflt = default value for this option
validator = [optional] validator for option value. Predefined
validators are:
PathAccept -- accepts any path setting; no validation
PathIsDir -- path must be an existing directory
PathIsDirCreate -- path must be a dir; will create
PathIsFile -- path must be a file
PathExists -- path must exist (any type) [default]
The validator is a function that is called and which
should return True or False to indicate if the path
is valid. The arguments to the validator function
are: (key, val, env). The key is the name of the
option, the val is the path specified for the option,
and the env is the env to which the Otions have been
added.
Usage example:
Examples:
prefix=/usr/local
opts = Variables()
opts = Variables()
opts.Add(PathVariable('qtdir',
'where the root of Qt is installed',
qtdir, PathIsDir))
opts.Add(PathVariable('qt_includes',
'where the Qt includes are installed',
'$qtdir/includes', PathIsDirCreate))
opts.Add(PathVariable('qt_libraries',
'where the Qt library is installed',
'$qtdir/lib'))
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Variables/PathVariable.py 2014/07/05 09:42:21 garyo"
__all__ = ['PathVariable',]
import os
import os.path
import SCons.Errors
class _PathVariableClass(object):
def PathAccept(self, key, val, env):
"""Accepts any path, no checking done."""
pass
def PathIsDir(self, key, val, env):
"""Validator to check if Path is a directory."""
if not os.path.isdir(val):
if os.path.isfile(val):
m = 'Directory path for option %s is a file: %s'
else:
m = 'Directory path for option %s does not exist: %s'
raise SCons.Errors.UserError(m % (key, val))
def PathIsDirCreate(self, key, val, env):
"""Validator to check if Path is a directory,
creating it if it does not exist."""
if os.path.isfile(val):
m = 'Path for option %s is a file, not a directory: %s'
raise SCons.Errors.UserError(m % (key, val))
if not os.path.isdir(val):
os.makedirs(val)
def PathIsFile(self, key, val, env):
"""validator to check if Path is a file"""
if not os.path.isfile(val):
if os.path.isdir(val):
m = 'File path for option %s is a directory: %s'
else:
m = 'File path for option %s does not exist: %s'
raise SCons.Errors.UserError(m % (key, val))
def PathExists(self, key, val, env):
"""validator to check if Path exists"""
if not os.path.exists(val):
m = 'Path for option %s does not exist: %s'
raise SCons.Errors.UserError(m % (key, val))
def __call__(self, key, help, default, validator=None):
# NB: searchfunc is currenty undocumented and unsupported
"""
The input parameters describe a 'path list' option, thus they
are returned with the correct converter and validator appended. The
result is usable for input to opts.Add() .
The 'default' option specifies the default path to use if the
user does not specify an override with this option.
validator is a validator, see this file for examples
"""
if validator is None:
validator = self.PathExists
if SCons.Util.is_List(key) or SCons.Util.is_Tuple(key):
return (key, '%s ( /path/to/%s )' % (help, key[0]), default,
validator, None)
else:
return (key, '%s ( /path/to/%s )' % (help, key), default,
validator, None)
PathVariable = _PathVariableClass()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "b596a203ff6e919afaa3fed5179f31d2",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 86,
"avg_line_length": 38.17687074829932,
"alnum_prop": 0.6211689237348539,
"repo_name": "tempbottle/Nuitka",
"id": "3261d73064ea82617a7c71aea251ab866eb97043",
"size": "5612",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Variables/PathVariable.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5518"
},
{
"name": "Batchfile",
"bytes": "1810"
},
{
"name": "C",
"bytes": "36149"
},
{
"name": "C++",
"bytes": "433315"
},
{
"name": "Python",
"bytes": "4356577"
},
{
"name": "Shell",
"bytes": "2059"
}
],
"symlink_target": ""
} |
"""Support for Speedtest.net internet speed testing sensor."""
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.core import callback
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import (
ATTR_BYTES_RECEIVED,
ATTR_BYTES_SENT,
ATTR_SERVER_COUNTRY,
ATTR_SERVER_ID,
ATTR_SERVER_NAME,
ATTRIBUTION,
DEFAULT_NAME,
DOMAIN,
ICON,
SENSOR_TYPES,
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Speedtestdotnet sensors."""
speedtest_coordinator = hass.data[DOMAIN]
entities = []
for sensor_type in SENSOR_TYPES:
entities.append(SpeedtestSensor(speedtest_coordinator, sensor_type))
async_add_entities(entities)
class SpeedtestSensor(CoordinatorEntity, RestoreEntity):
"""Implementation of a speedtest.net sensor."""
def __init__(self, coordinator, sensor_type):
"""Initialize the sensor."""
super().__init__(coordinator)
self._name = SENSOR_TYPES[sensor_type][0]
self.type = sensor_type
self._unit_of_measurement = SENSOR_TYPES[self.type][1]
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return f"{DEFAULT_NAME} {self._name}"
@property
def unique_id(self):
"""Return sensor unique_id."""
return self.type
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Return icon."""
return ICON
@property
def device_state_attributes(self):
"""Return the state attributes."""
if not self.coordinator.data:
return None
attributes = {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_SERVER_NAME: self.coordinator.data["server"]["name"],
ATTR_SERVER_COUNTRY: self.coordinator.data["server"]["country"],
ATTR_SERVER_ID: self.coordinator.data["server"]["id"],
}
if self.type == "download":
attributes[ATTR_BYTES_RECEIVED] = self.coordinator.data["bytes_received"]
if self.type == "upload":
attributes[ATTR_BYTES_SENT] = self.coordinator.data["bytes_sent"]
return attributes
async def async_added_to_hass(self):
"""Handle entity which will be added."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state:
self._state = state.state
@callback
def update():
"""Update state."""
self._update_state()
self.async_write_ha_state()
self.async_on_remove(self.coordinator.async_add_listener(update))
self._update_state()
def _update_state(self):
"""Update sensors state."""
if self.coordinator.data:
if self.type == "ping":
self._state = self.coordinator.data["ping"]
elif self.type == "download":
self._state = round(self.coordinator.data["download"] / 10 ** 6, 2)
elif self.type == "upload":
self._state = round(self.coordinator.data["upload"] / 10 ** 6, 2)
| {
"content_hash": "035344f61acdebeed131883c508898ff",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 85,
"avg_line_length": 30.9375,
"alnum_prop": 0.6112554112554113,
"repo_name": "turbokongen/home-assistant",
"id": "5607d2570c9797d95f3c4915f07c92bf199c14e9",
"size": "3465",
"binary": false,
"copies": "8",
"ref": "refs/heads/dev",
"path": "homeassistant/components/speedtestdotnet/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "30405146"
},
{
"name": "Shell",
"bytes": "4832"
}
],
"symlink_target": ""
} |
"""The security service api."""
from oslo_log import log
import six
import webob
from webob import exc
from manila.api import common
from manila.api.openstack import wsgi
from manila.api.views import security_service as security_service_views
from manila.common import constants
from manila import db
from manila import exception
from manila.i18n import _
from manila.i18n import _LI
from manila import policy
RESOURCE_NAME = 'security_service'
LOG = log.getLogger(__name__)
class SecurityServiceController(wsgi.Controller):
"""The Shares API controller for the OpenStack API."""
_view_builder_class = security_service_views.ViewBuilder
def show(self, req, id):
"""Return data about the given security service."""
context = req.environ['manila.context']
try:
security_service = db.security_service_get(context, id)
policy.check_policy(context, RESOURCE_NAME, 'show',
security_service)
except exception.NotFound:
raise exc.HTTPNotFound()
return self._view_builder.detail(req, security_service)
def delete(self, req, id):
"""Delete a security service."""
context = req.environ['manila.context']
LOG.info(_LI("Delete security service with id: %s"),
id, context=context)
try:
security_service = db.security_service_get(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
share_nets = db.share_network_get_all_by_security_service(
context, id)
if share_nets:
# Cannot delete security service
# if it is assigned to share networks
raise exc.HTTPForbidden()
policy.check_policy(context, RESOURCE_NAME,
'delete', security_service)
db.security_service_delete(context, id)
return webob.Response(status_int=202)
def index(self, req):
"""Returns a summary list of security services."""
policy.check_policy(req.environ['manila.context'], RESOURCE_NAME,
'index')
return self._get_security_services(req, is_detail=False)
def detail(self, req):
"""Returns a detailed list of security services."""
policy.check_policy(req.environ['manila.context'], RESOURCE_NAME,
'detail')
return self._get_security_services(req, is_detail=True)
def _get_security_services(self, req, is_detail):
"""Returns a transformed list of security services.
The list gets transformed through view builder.
"""
context = req.environ['manila.context']
search_opts = {}
search_opts.update(req.GET)
# NOTE(vponomaryov): remove 'status' from search opts
# since it was removed from security service model.
search_opts.pop('status', None)
if 'share_network_id' in search_opts:
share_nw = db.share_network_get(context,
search_opts['share_network_id'])
security_services = share_nw['security_services']
del search_opts['share_network_id']
else:
if 'all_tenants' in search_opts:
policy.check_policy(context, RESOURCE_NAME,
'get_all_security_services')
security_services = db.security_service_get_all(context)
else:
security_services = db.security_service_get_all_by_project(
context, context.project_id)
search_opts.pop('all_tenants', None)
common.remove_invalid_options(
context,
search_opts,
self._get_security_services_search_options())
if search_opts:
results = []
not_found = object()
for ss in security_services:
if all(ss.get(opt, not_found) == value for opt, value in
six.iteritems(search_opts)):
results.append(ss)
security_services = results
limited_list = common.limited(security_services, req)
if is_detail:
security_services = self._view_builder.detail_list(
req, limited_list)
for ss in security_services['security_services']:
share_networks = db.share_network_get_all_by_security_service(
context,
ss['id'])
ss['share_networks'] = [sn['id'] for sn in share_networks]
else:
security_services = self._view_builder.summary_list(
req, limited_list)
return security_services
def _get_security_services_search_options(self):
return ('name', 'id', 'type', 'user',
'server', 'dns_ip', 'domain', )
def _share_servers_dependent_on_sn_exist(self, context,
security_service_id):
share_networks = db.share_network_get_all_by_security_service(
context, security_service_id)
for sn in share_networks:
if sn['share_servers']:
return True
return False
def update(self, req, id, body):
"""Update a security service."""
context = req.environ['manila.context']
if not body or 'security_service' not in body:
raise exc.HTTPUnprocessableEntity()
security_service_data = body['security_service']
valid_update_keys = (
'description',
'name'
)
try:
security_service = db.security_service_get(context, id)
policy.check_policy(context, RESOURCE_NAME, 'update',
security_service)
except exception.NotFound:
raise exc.HTTPNotFound()
if self._share_servers_dependent_on_sn_exist(context, id):
for item in security_service_data:
if item not in valid_update_keys:
msg = _("Cannot update security service %s. It is "
"attached to share network with share server "
"associated. Only 'name' and 'description' "
"fields are available for update.") % id
raise exc.HTTPForbidden(explanation=msg)
policy.check_policy(context, RESOURCE_NAME, 'update', security_service)
security_service = db.security_service_update(
context, id, security_service_data)
return self._view_builder.detail(req, security_service)
def create(self, req, body):
"""Creates a new security service."""
context = req.environ['manila.context']
policy.check_policy(context, RESOURCE_NAME, 'create')
if not self.is_valid_body(body, 'security_service'):
raise exc.HTTPUnprocessableEntity()
security_service_args = body['security_service']
security_srv_type = security_service_args.get('type')
allowed_types = constants.SECURITY_SERVICES_ALLOWED_TYPES
if security_srv_type not in allowed_types:
raise exception.InvalidInput(
reason=(_("Invalid type %(type)s specified for security "
"service. Valid types are %(types)s") %
{'type': security_srv_type,
'types': ','.join(allowed_types)}))
security_service_args['project_id'] = context.project_id
security_service = db.security_service_create(
context, security_service_args)
return self._view_builder.detail(req, security_service)
def create_resource():
return wsgi.Resource(SecurityServiceController())
| {
"content_hash": "1dbc12778856f6bad26bdefcac0a4a06",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 79,
"avg_line_length": 38.40394088669951,
"alnum_prop": 0.5846587993842997,
"repo_name": "jcsp/manila",
"id": "c0ffb37afc645ed869d5c7c0c9fbadf2e0b72194",
"size": "8425",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "manila/api/v1/security_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "953"
},
{
"name": "Python",
"bytes": "4993686"
},
{
"name": "Shell",
"bytes": "42913"
}
],
"symlink_target": ""
} |
subreddit = 'tf2+tf2memes+tf2shitposterclub'
t_channel = '@r_TF2'
def send_post(submission, r2t):
return r2t.send_simple(submission)
| {
"content_hash": "f9c6bc09cfc617406ad2618bed082c19",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 44,
"avg_line_length": 23.166666666666668,
"alnum_prop": 0.7338129496402878,
"repo_name": "Fillll/reddit2telegram",
"id": "308b1852787975c468ee7bc0ccd039c43b93cdbe",
"size": "156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reddit2telegram/channels/~inactive/r_tf2/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "301463"
},
{
"name": "Shell",
"bytes": "153"
}
],
"symlink_target": ""
} |
"""Support for Ebusd daemon for communication with eBUS heating systems."""
from datetime import timedelta
import logging
import socket
import ebusdpy
import voluptuous as vol
from homeassistant.const import (
CONF_HOST,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
CONF_PORT,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import load_platform
from homeassistant.util import Throttle
from .const import DOMAIN, SENSOR_TYPES
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "ebusd"
DEFAULT_PORT = 8888
CONF_CIRCUIT = "circuit"
CACHE_TTL = 900
SERVICE_EBUSD_WRITE = "ebusd_write"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=15)
def verify_ebusd_config(config):
"""Verify eBusd config."""
circuit = config[CONF_CIRCUIT]
for condition in config[CONF_MONITORED_CONDITIONS]:
if condition not in SENSOR_TYPES[circuit]:
raise vol.Invalid(f"Condition '{condition}' not in '{circuit}'.")
return config
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
vol.All(
{
vol.Required(CONF_CIRCUIT): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=[]): cv.ensure_list,
},
verify_ebusd_config,
)
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the eBusd component."""
conf = config[DOMAIN]
name = conf[CONF_NAME]
circuit = conf[CONF_CIRCUIT]
monitored_conditions = conf.get(CONF_MONITORED_CONDITIONS)
server_address = (conf.get(CONF_HOST), conf.get(CONF_PORT))
try:
_LOGGER.debug("Ebusd integration setup started")
ebusdpy.init(server_address)
hass.data[DOMAIN] = EbusdData(server_address, circuit)
sensor_config = {
CONF_MONITORED_CONDITIONS: monitored_conditions,
"client_name": name,
"sensor_types": SENSOR_TYPES[circuit],
}
load_platform(hass, "sensor", DOMAIN, sensor_config, config)
hass.services.register(DOMAIN, SERVICE_EBUSD_WRITE, hass.data[DOMAIN].write)
_LOGGER.debug("Ebusd integration setup completed")
return True
except (socket.timeout, OSError):
return False
class EbusdData:
"""Get the latest data from Ebusd."""
def __init__(self, address, circuit):
"""Initialize the data object."""
self._circuit = circuit
self._address = address
self.value = {}
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self, name, stype):
"""Call the Ebusd API to update the data."""
try:
_LOGGER.debug("Opening socket to ebusd %s", name)
command_result = ebusdpy.read(
self._address, self._circuit, name, stype, CACHE_TTL
)
if command_result is not None:
if "ERR:" in command_result:
_LOGGER.warning(command_result)
else:
self.value[name] = command_result
except RuntimeError as err:
_LOGGER.error(err)
raise RuntimeError(err) from err
def write(self, call):
"""Call write methon on ebusd."""
name = call.data.get("name")
value = call.data.get("value")
try:
_LOGGER.debug("Opening socket to ebusd %s", name)
command_result = ebusdpy.write(self._address, self._circuit, name, value)
if command_result is not None:
if "done" not in command_result:
_LOGGER.warning("Write command failed: %s", name)
except RuntimeError as err:
_LOGGER.error(err)
| {
"content_hash": "a14cfe991c60d57b54a54156b765e825",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 88,
"avg_line_length": 30.89763779527559,
"alnum_prop": 0.6049949031600408,
"repo_name": "tchellomello/home-assistant",
"id": "855e62727b56838aeed96599cafd7c5e23c803ed",
"size": "3924",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/ebusd/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "26713364"
},
{
"name": "Shell",
"bytes": "4528"
}
],
"symlink_target": ""
} |
# -*- coding: utf-8 -*-
# ProjectEuler/src/python/problem140.py
#
# Modified Fibonacci golden nuggets
# =================================
# Published on Saturday, 3rd February 2007, 07:00 am
#
# Consider the infinite polynomial series AG(x) = xG1 + x2G2 + x3G3 + ...,
# where Gk is the kth term of the second order recurrence relation Gk = Gk1 +
# Gk2, G1 = 1 and G2 = 4; that is, 1, 4, 5, 9, 14, 23, ... . For this problem
# we shall be concerned with values of x for which AG(x) is a positive integer.
# The corresponding values of x for the first five natural numbers are shown
# below. xAG(x) (51)/41 2/52 (222)/63 (1375)/144 1/25 We shall
# call AG(x) a golden nugget if x is rational, because they become increasingly
# rarer; for example, the 20th golden nugget is 211345365. Find the sum of the
# first thirty golden nuggets.
import projecteuler as pe
def main():
pass
if __name__ == "__main__":
main()
| {
"content_hash": "ffe16dda808cfd55f0ead3f481d144dd",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 79,
"avg_line_length": 39.125,
"alnum_prop": 0.6528221512247071,
"repo_name": "olduvaihand/ProjectEuler",
"id": "fd6b2b25dc06ed58a27283db0ff99ecc1a801907",
"size": "941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/problem140.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "0"
},
{
"name": "Python",
"bytes": "422751"
}
],
"symlink_target": ""
} |
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class CopyFolder(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the CopyFolder Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(CopyFolder, self).__init__(temboo_session, '/Library/Box/Folders/CopyFolder')
def new_input_set(self):
return CopyFolderInputSet()
def _make_result_set(self, result, path):
return CopyFolderResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return CopyFolderChoreographyExecution(session, exec_id, path)
class CopyFolderInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the CopyFolder
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The access token retrieved during the OAuth2 process.)
"""
super(CopyFolderInputSet, self)._set_input('AccessToken', value)
def set_AsUser(self, value):
"""
Set the value of the AsUser input for this Choreo. ((optional, string) The ID of the user. Only used for enterprise administrators to make API calls for their managed users.)
"""
super(CopyFolderInputSet, self)._set_input('AsUser', value)
def set_Fields(self, value):
"""
Set the value of the Fields input for this Choreo. ((optional, string) A comma-separated list of fields to include in the response.)
"""
super(CopyFolderInputSet, self)._set_input('Fields', value)
def set_FolderID(self, value):
"""
Set the value of the FolderID input for this Choreo. ((required, string) The id of the folder to copy.)
"""
super(CopyFolderInputSet, self)._set_input('FolderID', value)
def set_Name(self, value):
"""
Set the value of the Name input for this Choreo. ((required, string) An optional new name for the folder.)
"""
super(CopyFolderInputSet, self)._set_input('Name', value)
def set_ParentID(self, value):
"""
Set the value of the ParentID input for this Choreo. ((required, string) The ID of the destination folder to copy the folder to.)
"""
super(CopyFolderInputSet, self)._set_input('ParentID', value)
class CopyFolderResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the CopyFolder Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Box.)
"""
return self._output.get('Response', None)
class CopyFolderChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return CopyFolderResultSet(response, path)
| {
"content_hash": "6e27fbdbaadceddae6139f72ace0feb1",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 182,
"avg_line_length": 41.28395061728395,
"alnum_prop": 0.6791267942583732,
"repo_name": "jordanemedlock/psychtruths",
"id": "f92402fcfe9019821bfbbdfcf6969e8c5f81da6e",
"size": "4205",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "temboo/core/Library/Box/Folders/CopyFolder.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "HTML",
"bytes": "34650"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23444578"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
from sqlalchemy import desc, asc
from webob.multidict import MultiDict
class SortQueryGenerator(OrderedDict):
@classmethod
def from_getlist(cls, field_list):
# assert isinstance(field_list, unicode)
return cls(cls._convert_from_getlist(field_list))
@classmethod
def _convert_from_getlist(cls, raw_field_names):
if not raw_field_names:
return
for field in raw_field_names.split(','):
name, order = field.split(':')
assert order in ('asc', 'desc')
yield (name, order)
def __setitem__(self, name, order):
superobj = super(SortQueryGenerator, self)
assert order in ('desc', 'asc', None)
if order in ('asc', 'desc'):
superobj.__setitem__(name, order)
elif order is None:
superobj.__delitem__(name)
_mutate = __setitem__
def get_params(self):
res = []
for field, order in self.items():
res.append('{name}:{order}'. format(name = field, order = order))
return ",". join(res)
def get_query(self, query, mapping):
for field, order in self.items():
field = mapping.get(field)
if field is not None:
if order == 'asc':
query = query.order_by(asc(field))
else:
query = query.order_by(desc(field))
return query
def can_sort(self, name, order):
my_order = self.get(name)
if my_order is None and order == None:
return False
elif order == my_order:
return False
else:
return True
class FilterQueryGenerator(list):
@classmethod
def from_params(cls, params):
obj = cls()
filters_names = params.getall('filter')
for f in filters_names:
op = params.get('op[{0}]'.format(f))
v = params.get('v[{0}]'. format(f))
obj.append((f, op, v))
return obj
def get_params(self):
res = []
for name, op, v in self:
res.append(('filter', name))
res.append(('op[{0}]'.format(name), op))
res.append(('v[{0}]'.format(name), v))
return res # TODO: to MultiDict
def get_query(self, query, mapping):
for field_name, op, v in self:
if op != 'equal':
continue
field = mapping.get(field_name)
if field is not None:
query = query.filter(field == v)
return query
class Query(object):
# TODO: configurability. If someone dislike pagination
def __init__(self, params):
self.sorter = SortQueryGenerator.from_getlist(params.get('sort'))
self.filter = FilterQueryGenerator.from_params(params)
self.page = params.get('page', 0)
def copy(self):
return self.__class__(self.get_params())
def get_alchemy_query(self, query, mapping):
query = self.sorter.get_query(query, mapping)
query = self.filter.get_query(query, mapping)
return query
def get_params(self):
params = []
# TODO: it wants to be prettier
filters = self.filter.get_params()
if filters:
params += filters
sort = self.sorter.get_params()
if sort:
params.append(('sort', sort))
page = self.page
if page:
params.append(('page', self.page))
return MultiDict(params)
# helpers for template. Must be in mixin
# anyway we should use request there. cos this is only params
def get_sort_url(self, request, name, order):
params = self.copy()
params.sorter._mutate(name, order)
return request.current_route_url(_query=params.get_params())
def get_filter_url(self, request, field_name, op, v):
params = self.copy()
params.filter = params.filter.__class__([(field_name, op, v)])
return request.current_route_url(_query=params.get_params())
def clear_sort(self, request):
params = self.copy()
params.sorter = params.sorter.__class__()
return request.current_route_url(_query=params.get_params())
def clear_filter(self, request):
params = self.copy()
params.filter = params.filter.__class__()
return request.current_route_url(_query=params.get_params())
def get_page(self, request, page_num):
params = self.copy()
params.page = page_num
return request.current_route_url(_query=params.get_params())
def get_fields_from_model(model):
fields = dict((column.name, column) for column in model.__table__.columns)
return fields
| {
"content_hash": "a33fa569da249dc824915314cf39bcb0",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 79,
"avg_line_length": 30.61006289308176,
"alnum_prop": 0.5559893158002877,
"repo_name": "fillest/sapyens",
"id": "1f13e7ead2b07c1bd2cb682a145db8b3744c998a",
"size": "4891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sapyens/crud/helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Mako",
"bytes": "11297"
},
{
"name": "Python",
"bytes": "61021"
},
{
"name": "Shell",
"bytes": "801"
}
],
"symlink_target": ""
} |
import os
import sys
import unittest
## package
from leylab_pipelines.DB import Convert
# data dir
test_dir = os.path.join(os.path.dirname(__file__))
data_dir = os.path.join(test_dir, 'data')
# global variables
ENSEMBL_ID = 'ENST00000407559'
HGNC_ID = '9245'
ENTREZ_ID = '39'
UNIPROT_ID = 'Q9BWD1'
ACCESSION_ID = 'AC131209'
EMAIL = 'dummy@dummy.info'
FAKE_EMAIL = 'pyentrez.info'
# tests
class Test_Convert(unittest.TestCase):
def setUp(self):
self.Id = Convert.Conversion(EMAIL)
def tearDown(self):
pass
def test_email(self):
self.assertRaises(ValueError, Convert.Conversion, FAKE_EMAIL)
def test_convert_ensembl_to_entrez(self):
# real ID
ID = self.Id.convert_ensembl_to_entrez(ENSEMBL_ID)
self.assertListEqual(ID, [ENSEMBL_ID, '55112'])
# fake ID
ID = self.Id.convert_ensembl_to_entrez(ENSEMBL_ID + '_FAKEID')
self.assertListEqual(ID, [ENSEMBL_ID + '_FAKEID', None])
def test_convert_hgnc_to_entrez(self):
# real ID
ID = self.Id.convert_hgnc_to_entrez(HGNC_ID)
self.assertListEqual(ID, [HGNC_ID, '8500'])
# fake ID
ID = self.Id.convert_hgnc_to_entrez(HGNC_ID + '_FAKEID')
self.assertListEqual(ID, [HGNC_ID + '_FAKEID', None])
def test_convert_entrez_to_uniprot(self):
# real ID
ID = self.Id.convert_entrez_to_uniprot(ENTREZ_ID)
self.assertListEqual(ID, [ENTREZ_ID, 'Q9BWD1'])
# fake ID
def test_convert_uniprot_to_entrez(self):
# real ID
ID = self.Id.convert_uniprot_to_entrez(UNIPROT_ID)
self.assertListEqual(ID, [UNIPROT_ID, '39'])
# fake ID
def test_convert_accesion_to_taxid(self):
# real ID
ID = self.Id.convert_accession_to_taxid(ACCESSION_ID)
self.assertListEqual(ID, [ACCESSION_ID, '9606'])
# fake ID
ID = self.Id.convert_accession_to_taxid(ACCESSION_ID + '_FAKEID')
self.assertListEqual(ID, [ACCESSION_ID + '_FAKEID', None])
| {
"content_hash": "4cc5c12db1ecd2f99538b1d3d8ee6dcb",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 73,
"avg_line_length": 28.416666666666668,
"alnum_prop": 0.6212121212121212,
"repo_name": "leylabmpi/leylab_pipelines",
"id": "56bba0012d6b2373b542ac75e53b4a47f04bb32c",
"size": "2115",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_Convert.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2331"
},
{
"name": "Python",
"bytes": "132653"
},
{
"name": "R",
"bytes": "9123"
}
],
"symlink_target": ""
} |
"""
Project Bluebox
2015, University of Stuttgart, IPVS/AS
"""
from _collections_abc import Iterator
from itertools import count
import abc
"""
Project Bluebox
Copyright (C) <2015> <University of Stuttgart>
This software may be modified and distributed under the terms
of the MIT license. See the LICENSE file for details.
"""
from flask import Flask, render_template, request, Response
from werkzeug import secure_filename
from SwiftConnect import SwiftConnect
import json, logging, os, time, datetime
import appConfig
n = 0
def set_globvar_to_one():
global n # Needed to modify global copy of n(index)
n = n+ 6
# initialize logging
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(module)s - %(levelname)s ##\t %(message)s')
log = logging.getLogger()
# Initialize the Flask application
app = Flask(__name__)
# Instantiating SwiftClient
swift = SwiftConnect(appConfig.swift_type, appConfig.swift_url, appConfig.swift_user, appConfig.swift_pw)
##########################################################################################
"""
This route will show a form to perform an AJAX request
jQuery is loaded to execute the request and update the
value of the operation
"""
@app.route('/')
def index():
return render_template('index.html')
@app.route('/nextpage')
def index2():
return render_template('index2.html')
##########################################################################################
"""
get the list of containers , we get value of marker from the front end (java script)
"""
@app.route('/swift/containers', methods=['GET'])
def getContainers():
print("inside container list")
log.debug("inside container list")
m =request.args.get('marker','')
print(m)
ctss= swift.containerList(marker=m)
j = json.dumps(ctss,sort_keys=True)
return Response(j, mimetype='application/json')
##########################################################################################
##########################################################################################
# """
# get the list of containers
# """
# @app.route('/swift/Display', methods=['GET'])
# def Display(ctss):
#
# j = json.dumps(ctss,sort_keys=True)
# return Response(j, mimetype='application/json')
##########################################################################################
##########################################################################################
"""
# get the list of next containers
# """
# @app.route('/swift/containers/next', methods=['GET'])
# def getNextContainers():
#
#
# print("hello")
# print("hello")
# cts= swift.containerList()
# # mark=""
# list=6
# set_globvar_to_one()
# print(n)
# ctss= swift.containerListLimit(list,n)
#
# j = json.dumps(ctss,sort_keys=True)
# return Response(j, mimetype='application/json')
##########################################################################################
"""
create the Container
"""
##########################################################################################
@app.route('/create', methods=['POST'])
def create():
folderName = request.form['containerName']
print(folderName)
swift.createContainer(folderName)
return Response(None)
##########################################################################################
"""
get the list of all objects in a container
"""
@app.route('/swift/containers/<containerName>/objects', methods=['GET'])
def getObjectsInContainer(containerName):
n =request.args.get('marker','')
print('hallo ' +n);
log.debug(n)
log.debug(containerName)
cts = swift.fileList(containerName,marker=n)
f = json.dumps(cts,sort_keys=True)
return Response(f, mimetype='application/json')
"""
parse objects size
"""
def parseObjects(container):
x = swift.ObjectList(container);
log.debug(x)
print("inside container list22")
##########################################################################################
@app.route('/swift/containers/<containerName>/objects/<path:filename>/details', methods=['GET'])
def getMetaDataInfo(containerName,filename):
log.debug("Get metadata information")
log.debug(containerName)
log.debug(filename)
metaInfo = swift.getObjMetaData(containerName,filename)
metadata = json.dumps(metaInfo,sort_keys=True)
return Response(metadata, mimetype='application/json')
##########################################################################################
"""
Route that will process the file upload
"""
@app.route('/upload', methods=['POST'])
def upload():
# Get the name of the uploaded file
log.debug("inside the upload part")
inputFile = request.files['objectName']
# Check if the file is one of the allowed types/extensions
if inputFile:
log.debug("accepted file upload")
# Make the filename safe, remove unsupported chars
inputFileName = secure_filename(inputFile.filename)
log.debug(inputFileName)
inputFileContent = inputFile.read()
print("hjdgkjdgffhgkdsjh",inputFileContent)
log.debug(inputFileContent)
folderName = request.form['containerNameUp']
log.debug(folderName)
retentime = request.form['RetentionPeriod']
log.debug(retentime)
if retentime:
convertretentime = datetime.datetime.strptime(retentime,"%Y-%m-%d").strftime("%d-%m-%Y")
log.debug(convertretentime)
retentimestamp = int(time.mktime(datetime.datetime.strptime(convertretentime, "%d-%m-%Y").timetuple()))
log.debug(retentimestamp)
else:
retentimestamp = retentime
h = dict()
h["X-Object-Meta-RetentionTime"] = retentimestamp
h["X-Object-Meta-OwnerName"] = request.form['OwnerName']
swift.createObject(inputFileName,inputFileContent,folderName,h,chunk_size=10)
encodedoutputFileContent = swift.retrieveObject(folderName,inputFileName)
return Response(None)
##########################################################################################
"""
download obj route
"""
@app.route('/swift/containers/<containerName>/objects/<path:filename>', methods=['GET'])
def downloadObject(containerName, filename):
log.debug("downloadObject: %s - %s" % (containerName, filename))
encodedOutputFile = swift.getObject(containerName,filename,resp_chunk_size=10)
return Response(encodedOutputFile, mimetype='application/octet-stream')
##########################################################################################
def calcTimeDifference(timestamp):
try:
return int(timestamp) - int(time.time())
except ValueError:
return False
def isRetentionPeriodExpired(timestamp):
if (calcTimeDifference(timestamp)):
return calcTimeDifference(timestamp) <= 0
return False
"""
delete obj route
"""
@app.route('/swift/containers/<containerName>/objects/<path:filename>', methods=['DELETE'])
def deleteObject(containerName,filename):
log.debug("deleteObject: %s - %s" % (containerName, filename))
json1 = json.dumps(swift.getObjMetaData(containerName,filename),ensure_ascii=False)
log.debug(json1)
new_dict = json.loads(json1)
retentimestamp = new_dict['x-object-meta-retentiontime']
if (isRetentionPeriodExpired(retentimestamp) or not retentimestamp):
swift.delObject(containerName,filename)
responsemsg={}
responsemsg['deletestatus'] = "done"
return Response(json.dumps(responsemsg),mimetype='application/json')
else:
log.debug("You are not allowed to delete the file!")
log.debug( "The retentiondate is: " +
datetime.datetime.fromtimestamp(
int(retentimestamp)
).strftime('%m-%d-%Y')
)
minutes, seconds = divmod(calcTimeDifference(retentimestamp), 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
weeks, days = divmod(days, 7)
log.debug("The number of days left for deletion: " + str(days))
log.debug("You should wait for "+ str(weeks)+" weeks and "+ str(days)+" days and "+str(hours)+" hours and "+str(minutes)+" minutes and"+str(seconds)+" seconds to delete this file!!!")
responsemsg={}
responsemsg['deletestatus'] = "failed"
responsemsg['retention'] = datetime.datetime.fromtimestamp(int(retentimestamp)).strftime('%m-%d-%Y')
responsemsg['seconds'] = seconds
responsemsg['minutes'] = minutes
responsemsg['hours'] = hours
responsemsg['days'] = days
responsemsg['weeks'] = weeks
return Response(json.dumps(responsemsg),mimetype='application/json')
#################################Scheduler#########################################################
@app.route('/swift/containers/<containerName>/CheckOldFiles/', methods=['GET'])
def CheckOldFiles(containerName, doDelete=False):
log.debug(containerName)
files = swift.fileList(containerName)
oldFiles={}
filenames = list()
for file in files:
log.debug('{0}\t{1}\t{2}'.format(file['name'], file['bytes'], file['last_modified']))
fileMetaDict = swift.getObjMetaData(containerName,file['name'])
log.debug(fileMetaDict)
log.debug(file['name'])
log.debug(fileMetaDict['x-object-meta-retentiontime'])
retentimestamp = fileMetaDict['x-object-meta-retentiontime']
if (isRetentionPeriodExpired(retentimestamp)):
filenames.append(file['name'])
log.debug(filenames)
responseObj = {"list" : filenames}
if (doDelete):
swift.delObjects(containerName,filenames)
return Response(json.dumps(responseObj),mimetype='application/json')
# TODO what should we do about the files which have no retention date
###################################################################################################
@app.route('/swift/containers/<containerName>/DeleteOldFiles/', methods=['Delete'])
def DeleteOldFiles(containerName):
return CheckOldFiles(containerName, doDelete=True)
###################################################################################################
#Main Function
if __name__ == '__main__':
appPort = os.getenv('VCAP_APP_PORT', '5000')
appHost = os.getenv('VCAP_APP_HOST', '127.0.0.1')
app.run(
host=appHost,
port=int(appPort),
debug=True
)
| {
"content_hash": "bf74bbb8efcec4605c4391058d71fa08",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 186,
"avg_line_length": 34.193333333333335,
"alnum_prop": 0.5947553129264964,
"repo_name": "Al-Hashimi/BlueMix",
"id": "fa957e360d46f5f28fd74f4a92da9c95f2a16b21",
"size": "10258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18698"
},
{
"name": "HTML",
"bytes": "7731"
},
{
"name": "JavaScript",
"bytes": "11264"
},
{
"name": "Python",
"bytes": "20958"
}
],
"symlink_target": ""
} |
"""
A directed graph is strongly connected if there is a path between all pairs of vertices. A strongly connected component (SCC) of a directed graph is a maximal strongly connected subgraph.
Time Complexity: The above algorithm calls DFS, fins reverse of the graph and again calls DFS. DFS takes O(V+E) for a graph represented using adjacency list. Reversing a graph also takes O(V+E) time. For reversing the graph, we simple traverse all adjacency lists.
Time Complexity: O(V + E)
"""
from collections import defaultdict
class Graph:
def __init__(self, vertices):
self.V = vertices
self.graph = defaultdict(list)
def add_edge(self, u, v):
self.graph[u].append(v)
def fill_order(self, vertex, visited, stack):
visited[vertex] = True
for neighbour in self.graph[vertex]:
if visited[neighbour] == False:
self.fill_order(neighbour, visited, stack)
stack.append(vertex)
def get_traspose(self):
g = Graph(self.V)
for u in self.graph:
for v in self.graph[u]:
g.add_edge(v, u)
return g
def dfs_util(self, vertex, visited, curr_res):
visited[vertex] = True
curr_res.append(vertex)
for u in self.graph[vertex]:
if visited[u] == False:
self.dfs_util(u, visited, curr_res)
def get_strongly_connected_component(self):
stack = []
result = []
visited = [False for i in xrange(self.V)]
for u in xrange(self.V):
if visited[u] == False:
self.fill_order(u, visited, stack)
transposed_graph = self.get_traspose()
visited = [False for i in xrange(self.V)]
while stack:
vertex = stack.pop()
if visited[vertex] == False:
curr_res = []
transposed_graph.dfs_util(vertex, visited, curr_res)
result.append(curr_res)
return result
g = Graph(5)
g.add_edge(1, 0)
g.add_edge(0, 2)
g.add_edge(2, 1)
g.add_edge(0, 3)
g.add_edge(3, 4)
print g.get_strongly_connected_component()
| {
"content_hash": "64789ca48b2bbf61f6c7d59adf6f9f3e",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 264,
"avg_line_length": 26.444444444444443,
"alnum_prop": 0.5985060690943044,
"repo_name": "codervikash/algorithms",
"id": "ccb5d3f60baf40c3eb5174a6cb113d1035b67f1d",
"size": "2142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/Graphs/kosaraju_strongly_connected_components.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "3500"
},
{
"name": "JavaScript",
"bytes": "6523"
},
{
"name": "Python",
"bytes": "55356"
}
],
"symlink_target": ""
} |
import pickle
import os
import settings
class Session(object):
def __init__(self):
self._data = {}
self._load()
def __getitem__(self, key):
return self._data.get(key)
def __setitem__(self, key, value):
self._data[key] = value
self._save()
def _load(self):
if not (os.path.exists(settings.SESSION_NAME) and os.path.isfile(settings.SESSION_NAME)):
self._save()
with open(settings.SESSION_NAME, 'rb+') as f:
self._data = pickle.load(f)
def _save(self):
with open(settings.SESSION_NAME, 'wb+') as f:
pickle.dump(self._data, f)
session = Session()
| {
"content_hash": "96f5a9da4b3f5fbff58122fca59ee4c3",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 97,
"avg_line_length": 23.137931034482758,
"alnum_prop": 0.563338301043219,
"repo_name": "patrickporto/python-box",
"id": "99917cef67658bddc8f64d18022668c5344b43e7",
"size": "671",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client/session.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24607"
}
],
"symlink_target": ""
} |
import copy
from auto_gen import DBVistrail as _DBVistrail
from auto_gen import DBAdd, DBChange, DBDelete, DBAbstraction, DBGroup, \
DBModule
from id_scope import IdScope
class DBVistrail(_DBVistrail):
def __init__(self, *args, **kwargs):
_DBVistrail.__init__(self, *args, **kwargs)
self.idScope = IdScope(remap={DBAdd.vtType: 'operation',
DBChange.vtType: 'operation',
DBDelete.vtType: 'operation',
DBAbstraction.vtType: DBModule.vtType,
DBGroup.vtType: DBModule.vtType})
self.idScope.setBeginId('action', 1)
self.db_objects = {}
# keep a reference to the current logging information here
self.db_log_filename = None
self.log = None
def __copy__(self):
return DBVistrail.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = _DBVistrail.do_copy(self, new_ids, id_scope, id_remap)
cp.__class__ = DBVistrail
cp.idScope = copy.copy(self.idScope)
cp.db_objects = copy.copy(self.db_objects)
cp.db_log_filename = self.db_log_filename
if self.log is not None:
cp.log = copy.copy(self.log)
else:
cp.log = None
return cp
@staticmethod
def update_version(old_obj, trans_dict, new_obj=None):
if new_obj is None:
new_obj = DBVistrail()
new_obj = _DBVistrail.update_version(old_obj, trans_dict, new_obj)
new_obj.update_id_scope()
if hasattr(old_obj, 'db_log_filename'):
new_obj.db_log_filename = old_obj.db_log_filename
if hasattr(old_obj, 'log'):
new_obj.log = old_obj.log
return new_obj
def update_id_scope(self):
def getOldObjId(operation):
if operation.vtType == 'change':
return operation.db_oldObjId
return operation.db_objectId
def getNewObjId(operation):
if operation.vtType == 'change':
return operation.db_newObjId
return operation.db_objectId
for action in self.db_actions:
self.idScope.updateBeginId('action', action.db_id+1)
if action.db_session is not None:
self.idScope.updateBeginId('session', action.db_session + 1)
for operation in action.db_operations:
self.idScope.updateBeginId('operation', operation.db_id+1)
if operation.vtType == 'add' or operation.vtType == 'change':
# update ids of data
self.idScope.updateBeginId(operation.db_what,
getNewObjId(operation)+1)
if operation.db_data is None:
if operation.vtType == 'change':
operation.db_objectId = operation.db_oldObjId
self.db_add_object(operation.db_data)
for annotation in action.db_annotations:
self.idScope.updateBeginId('annotation', annotation.db_id+1)
def db_add_object(self, obj):
self.db_objects[(obj.vtType, obj.db_id)] = obj
def db_get_object(self, type, id):
return self.db_objects.get((type, id), None)
def db_update_object(self, obj, **kwargs):
# want to swap out old object with a new version
# need this for updating aliases...
# hack it using setattr...
real_obj = self.db_objects[(obj.vtType, obj.db_id)]
for (k, v) in kwargs.iteritems():
if hasattr(real_obj, k):
setattr(real_obj, k, v)
| {
"content_hash": "521bdf941922af09a2e91880804ede75",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 77,
"avg_line_length": 40.10752688172043,
"alnum_prop": 0.5621983914209115,
"repo_name": "CMUSV-VisTrails/WorkflowRecommendation",
"id": "ba942d14483f36a9bea279d12019700ea6cfc6f2",
"size": "5572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vistrails/db/versions/v0_9_5/domain/vistrail.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "57"
},
{
"name": "PHP",
"bytes": "48730"
},
{
"name": "Python",
"bytes": "12760768"
},
{
"name": "Shell",
"bytes": "33785"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
} |
'''
Created on Apr 7, 2014
@author: paepcke
NOTE: Requires existence of database 'unittest' and user 'unittest' without pwd
and ALL privileges. (Could get away with fewer privileges, but who cares.
'''
from collections import OrderedDict
import datetime
import unittest
from scripts.addAnonToActivityGradeTable import AnonAndModIDAdder
from pymysql_utils.pymysql_utils import MySQLDB
class TestAddAnonToActivityGrade(unittest.TestCase):
studentmoduleExcerptSchema = OrderedDict({
'activity_grade_id' : 'INT',
'student_id' : 'INT',
'course_display_name' : 'VARCHAR(255)',
'grade' : 'VARCHAR(5)',
'max_grade' : 'DOUBLE',
'percent_grade' : 'DOUBLE',
'parts_correctness' : 'VARCHAR(255)',
'answers' : 'VARCHAR(255)',
'num_attempts' : 'INT',
'first_submit' : 'DATETIME',
'last_submit' : 'DATETIME',
'module_type' : 'VARCHAR(255)',
'anon_screen_name' : 'VARCHAR(40)',
'resource_display_name' : 'VARCHAR(255)',
'module_id' : 'VARCHAR(255)'
})
studentmoduleExcerptColNames = [
'activity_grade_id',
'student_id',
'course_display_name',
'grade',
'max_grade',
'percent_grade',
'parts_correctness',
'answers',
'num_attempts',
'first_submit',
'last_submit',
'module_type',
'anon_screen_name',
'resource_display_name',
'module_id'
]
userGradeExcerptSchema = OrderedDict({
'name' : 'varchar(255)',
'screen_name' : 'varchar(255)',
'grade' : 'int',
'course_id' : 'varchar(255)',
'distinction' : 'tinyint',
'status' : 'varchar(50)',
'user_int_id' : 'int',
'anon_screen_name' : 'varchar(40)'
})
userGradeExcerptColNames = [
'name',
'screen_name',
'grade',
'course_id',
'distinction',
'status',
'user_int_id',
'anon_screen_name'
]
state1 = ' {"correct_map": {"i4x-Medicine-HRP258-problem-0c6cf38317be42e0829d10cc68e7451b_2_1": {"hint": "", "hintmode": null, "correctness": "correct", "npoints": null, "msg": "", "queuestate": null}}, "input_state": {"i4x-Medicine-HRP258-problem-0c6cf38317be42e0829d10cc68e7451b_2_1": {}}, "attempts": 1, "seed": 1, "done": true, "student_answers": {"i4x-Medicine-HRP258-problem-0c6cf38317be42e0829d10cc68e7451b_2_1": "choice_1"}} '
state2 = '{"correct_map": {}, "seed": 1, "student_answers": {}, "input_state": {"i4x-Medicine-HRP258-problem-0c6cf38317be42e0829d10cc68e7451b_2_1": {}}}'
state3 = '{"position": 1}'
modid1 = 'i4x://Carnegie/2013/chapter/1fee4bc0d5384cb4aa7a0d65f3ac5d9b'
modid2 = 'i4x://Carnegie/2013/chapter/5d08d2bae3ac4047bf5abe1d8dd16ac3'
modid3 = 'i4x://Carnegie/2013/chapter/9a9455cd30bd4c14819542bcd11bfcf8'
studentmoduleExcerptValues = \
[
[0,1,'myCourse',3,10,-1.0,state1,'',-1,'2014-01-10 04:10:45','2014-02-10 10:14:40','modtype1','abc','Guided Walkthrough',modid1],
[1,2,'myCourse',5,10,-1.0,state2,'',-1,'2014-01-10 11:30:23','2014-02-10 14:30:12','modtype2','def','Evaluation',modid2],
[2,3,'yourCourse',8,10,-1.0,state3,'',-1,'2014-01-10 18:34:12','2014-02-10 19:10:33','modtype2','ghi','Introduction',modid3]
]
userGradeExcerptValues = \
[
['John Doe','myScreenName',0,'engineering/myCourse/summer2014',0,'notpassing',1,'abc'],
['Jane Silver','herScreenName',100,'engineering/myCourse/summer2014',1,'passing',2,'def']
]
def setUp(self):
self.allColNames = TestAddAnonToActivityGrade.studentmoduleExcerptColNames[0]
for colName in TestAddAnonToActivityGrade.studentmoduleExcerptColNames[1:]:
self.allColNames += ',' + colName
self.db = MySQLDB(user='unittest', passwd='', db='unittest')
self.db.dropTable('StudentmoduleExcerpt')
self.db.createTable('StudentmoduleExcerpt',
TestAddAnonToActivityGrade.studentmoduleExcerptSchema,
temporary=False)
#***temporary=True)
self.db.bulkInsert('StudentmoduleExcerpt',
TestAddAnonToActivityGrade.studentmoduleExcerptColNames,
TestAddAnonToActivityGrade.studentmoduleExcerptValues)
self.db.createTable('ActivityGrade', TestAddAnonToActivityGrade.studentmoduleExcerptSchema)
# Make sure there isn't left over content (if the table existed):
self.db.truncateTable('ActivityGrade')
# Rudimentary UserGrade table:
self.db.dropTable('UserGrade')
self.db.createTable('UserGrade',
TestAddAnonToActivityGrade.userGradeExcerptSchema,
temporary=False)
self.db.bulkInsert('UserGrade',
TestAddAnonToActivityGrade.userGradeExcerptColNames,
TestAddAnonToActivityGrade.userGradeExcerptValues)
self.db.close()
def tearDown(self):
self.db = MySQLDB(user='unittest', passwd='', db='unittest')
# Can't drop tables: hangs
#self.db.dropTable('StudentmoduleExcerpt')
#self.db.dropTable('ActivityGrade')
self.db.close()
pass
def testAddAnonToActivityTable(self):
try:
# Modify the fake courseware_studentmodule excerpt
# to add anon_screen_name, computer plusses/minusses,
# compute grade percentage, etc:
AnonAndModIDAdder('unittest', '', db='unittest', testing=True)
self.db = MySQLDB(user='unittest', passwd='', db='unittest')
for rowNum, row in enumerate(self.db.query('SELECT %s FROM ActivityGrade;' % self.allColNames)):
#print(row)
if rowNum == 0:
self.assertEqual((0, 1, 'myCourse', '3', 10.0, 30.0, '', '', -1, datetime.datetime(2014, 1, 10, 4, 10, 45), datetime.datetime(2014, 2, 10, 10, 14, 40), 'modtype1', 'abc', 'Guided Walkthrough', 'i4x://Carnegie/2013/chapter/1fee4bc0d5384cb4aa7a0d65f3ac5d9b'),
row)
elif rowNum == 1:
self.assertEqual((1, 2, 'myCourse', '5', 10.0, 50.0, '', '', -1, datetime.datetime(2014, 1, 10, 11, 30, 23), datetime.datetime(2014, 2, 10, 14, 30, 12), 'modtype2', 'def', 'Evaluation', 'i4x://Carnegie/2013/chapter/5d08d2bae3ac4047bf5abe1d8dd16ac3'),
row)
elif rowNum == 2:
self.assertEqual((2, 3, 'yourCourse', '8', 10.0, 80.0, '', '', -1, datetime.datetime(2014, 1, 10, 18, 34, 12), datetime.datetime(2014, 2, 10, 19, 10, 33), 'modtype2', 'None', 'Introduction', 'i4x://Carnegie/2013/chapter/9a9455cd30bd4c14819542bcd11bfcf8'),
row)
finally:
self.db.close()
def testCacheIdInt2Anon(self):
try:
infoAdder = AnonAndModIDAdder('unittest', '', db='unittest', testing=True)
self.assertEqual({1:'abc', 2:'def', 3: None}, infoAdder.int2AnonCache)
finally:
self.db.close()
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testAddAnonToActivityTable']
unittest.main()
| {
"content_hash": "0ec4f028c224b49749ac1aee0535b3b5",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 438,
"avg_line_length": 49.61077844311377,
"alnum_prop": 0.5254073627036814,
"repo_name": "paepcke/json_to_relation",
"id": "3ddb95f296036142150b0ff2985dcf67b534b77b",
"size": "9789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "json_to_relation/test/testAddAnonToActivityGradeTable.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Awk",
"bytes": "5012"
},
{
"name": "HTML",
"bytes": "1402"
},
{
"name": "JavaScript",
"bytes": "26860"
},
{
"name": "PLpgSQL",
"bytes": "66011"
},
{
"name": "Python",
"bytes": "906312"
},
{
"name": "Shell",
"bytes": "293055"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(
name='Rancher V2 API Integration Tests',
version='0.1',
packages=[
'core',
],
license='ASL 2.0',
)
| {
"content_hash": "21a1f3f3573f9a8a6cbe87bc01128857",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 44,
"avg_line_length": 16.6,
"alnum_prop": 0.5903614457831325,
"repo_name": "rancher/v2-api",
"id": "2fa5f03bf0c36ff1a58ed2d7fff041f6a37a8cd4",
"size": "166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "integration/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "8499"
},
{
"name": "Makefile",
"bytes": "411"
},
{
"name": "Python",
"bytes": "447"
},
{
"name": "Shell",
"bytes": "1707"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import json
from django.conf import settings
from django.contrib.gis.geos import Polygon
from django.core.urlresolvers import reverse
from django.db import transaction
from django.db.models import Q
from django.http import HttpResponseRedirect, HttpResponseForbidden, Http404
from django.shortcuts import get_object_or_404
from django.utils.timezone import now
from libs.data import merge
from libs.formatters import humanize_bytes
from libs.pdf_maps import create_event_map_pdf
from libs.sql import get_group_tree_count
from apps.core.helpers import (user_is_group_admin,
user_is_eligible_to_become_trusted_mapper)
from apps.core.decorators import group_request
from apps.core.models import Group
from apps.mail.views import notify_group_mapping_approved
from apps.users.models import Follow, TrustedMapper
from apps.users.forms import GroupSettingsForm
from apps.survey.models import Territory, Survey, Blockface
from apps.survey.layer_context import (get_context_for_territory_layer,
get_context_for_territory_admin_layer)
from apps.event.models import Event, EventRegistration
from apps.event.event_list import EventList
GROUP_EVENTS_ID = 'group-events'
GROUP_EDIT_EVENTS_TAB_ID = 'events'
def group_list_page(request):
# TODO: pagination
groups = Group.objects.filter(is_active=True).order_by('name')
group_ids = Follow.objects.filter(user_id=request.user.id) \
.values_list('group_id', flat=True)
user_is_following = [group.id in group_ids for group in groups]
group_infos = zip(groups, user_is_following)
return {
'groups': group_infos,
'groups_count': len(group_infos),
}
@group_request
def _group_events(request):
qs = Event.objects.filter(group=request.group, is_private=False)
user_can_edit_group = user_is_group_admin(request.user,
request.group)
extra_context = {'user_can_edit_group': user_can_edit_group,
'group_slug': request.group.slug}
return qs, extra_context
group_detail_events = EventList(
_group_events,
name="group_detail_events",
template_path='groups/partials/detail_event_list.html')
group_edit_events = EventList(
_group_events,
name="group_edit_events",
template_path='groups/partials/edit_event_list.html')
def group_detail(request):
user = request.user
group = request.group
if not user_is_group_admin(user, group) and not request.group.is_active:
raise Http404('Must be a group admin to view an inactive group')
event_list = (group_detail_events
.configure(chunk_size=2,
active_filter=EventList.Filters.CURRENT,
filterset_name=EventList.chronoFilters)
.as_context(request, group_slug=group.slug))
user_is_following = Follow.objects.filter(user_id=request.user.id,
group=group).exists()
show_mapper_request = user_is_eligible_to_become_trusted_mapper(user,
group)
follow_count = Follow.objects.filter(group=group).count()
tree_count = get_group_tree_count(group)
group_blocks = Territory.objects \
.filter(group=group) \
.values_list('blockface_id', flat=True)
group_blocks_count = group_blocks.count()
if group_blocks_count > 0:
completed_blocks = Survey.objects \
.filter(blockface_id__in=group_blocks) \
.distinct('blockface')
block_percent = "{:.1%}".format(
float(completed_blocks.count()) / float(group_blocks.count()))
else:
block_percent = "0.0%"
events_held = Event.objects.filter(group=group, ends_at__lt=now())
num_events_held = events_held.count()
num_event_attendees = EventRegistration.objects \
.filter(event__in=events_held) \
.filter(did_attend=True) \
.count()
return {
'group': group,
'event_list': event_list,
'user_is_following': user_is_following,
'edit_url': reverse('group_edit', kwargs={'group_slug': group.slug}),
'show_mapper_request': show_mapper_request,
'counts': {
'tree': tree_count,
'block': block_percent,
'event': num_events_held,
'attendees': num_event_attendees,
'follows': follow_count
},
'group_events_id': GROUP_EVENTS_ID,
'layer': get_context_for_territory_layer(request, request.group.id),
'territory_bounds': _group_territory_bounds(request.group),
'render_follow_button_without_count': request.POST.get(
'render_follow_button_without_count', False)
}
def redirect_to_group_detail(request):
return HttpResponseRedirect(
reverse('group_detail', kwargs={
'group_slug': request.group.slug
}))
def _group_territory_bounds(group):
blockfaces = Blockface.objects \
.filter(territory__group=group) \
.collect()
if blockfaces:
return list(blockfaces.extent)
else:
return None
def edit_group(request, form=None):
group = request.group
if not form:
form = GroupSettingsForm(instance=request.group, label_suffix='')
event_list = (group_edit_events
.configure(chunk_size=2,
active_filter=EventList.Filters.CURRENT,
filterset_name=EventList.chronoFilters)
.as_context(request, group_slug=group.slug))
pending_mappers = TrustedMapper.objects.filter(group=request.group,
is_approved__isnull=True)
all_mappers = TrustedMapper.objects.filter(group=request.group,
is_approved__isnull=False)
return {
'group': group,
'event_list': event_list,
'form': form,
'group_slug': group.slug,
'max_image_size': humanize_bytes(
settings.MAX_GROUP_IMAGE_SIZE_IN_BYTES, 0),
'pending_mappers': pending_mappers,
'all_mappers': all_mappers,
'group_edit_events_tab_id': GROUP_EDIT_EVENTS_TAB_ID,
}
def update_group_settings(request):
form = GroupSettingsForm(request.POST, request.FILES,
instance=request.group)
if form.is_valid():
form.save()
return HttpResponseRedirect(request.group.get_absolute_url())
else:
return edit_group(request, form=form)
def follow_group(request):
Follow.objects.get_or_create(user_id=request.user.id, group=request.group)
return group_detail(request)
def unfollow_group(request):
Follow.objects.filter(user_id=request.user.id, group=request.group) \
.delete()
return group_detail(request)
def start_group_map_print_job(request):
# TODO: implement
pass
def give_user_mapping_priveleges(request, username):
mapper_context = _grant_mapping_access(request.group, username,
is_approved=True)
mail_context = notify_group_mapping_approved(request, request.group,
username)
return merge(mapper_context, mail_context)
def remove_user_mapping_priveleges(request, username):
return _grant_mapping_access(request.group, username, is_approved=False)
def _grant_mapping_access(group, username, is_approved):
mapper, created = TrustedMapper.objects.update_or_create(
group=group,
user__username=username,
defaults={'is_approved': is_approved})
return {
'mapper': mapper
}
def request_mapper_status(request):
user, group = request.user, request.group
if not user_is_eligible_to_become_trusted_mapper(user, group):
return HttpResponseForbidden()
mapper, created = TrustedMapper.objects.update_or_create(
group=group, user=user)
return {
'success': True
}
def group_unmapped_territory_geojson(request, group_id):
# Get unmapped blockfaces
blockfaces = Blockface.objects.filter(is_available=True)
my_territory_q = Q(territory__group_id=group_id)
if request.body:
# Get potentially selectable blockfaces in polygon
# (those in my territory or unclaimed)
point_list = json.loads(request.body)
point_list.append(point_list[0]) # Close the polygon
polygon = Polygon((point_list))
no_reservations_q = \
Q(blockfacereservation__isnull=True) \
| Q(blockfacereservation__canceled_at__isnull=False) \
| Q(blockfacereservation__expires_at__lt=now())
nobodys_territory_q = Q(territory__group_id=None)
unclaimed_q = no_reservations_q & nobodys_territory_q
blockfaces = blockfaces \
.filter(geom__within=polygon) \
.filter(my_territory_q | unclaimed_q) \
.distinct()
# Return just blockface data
# (skipping expensive queries to make tiler URLs)
return _make_blockface_data_result(blockfaces)
else:
# Get all blockfaces in group's territory
blockfaces = blockfaces.filter(my_territory_q)
return _make_blockface_and_tiler_urls_result(
request, blockfaces, group_id)
def group_update_territory(request, group_id):
group = get_object_or_404(Group, id=group_id)
_update_territory(group, request)
# Recreate PDF maps to show updated group territory
_update_event_maps(request, group)
result_blockfaces = Blockface.objects.filter(territory__group=group)
return _make_blockface_and_tiler_urls_result(
request, result_blockfaces, group_id)
@transaction.atomic
def _update_territory(group, request):
new_block_ids = set([int(id) for id in json.loads(request.body)])
old_block_ids = set(Territory.objects
.filter(group=group)
.values_list('blockface_id', flat=True))
ids_to_add = new_block_ids - old_block_ids
ids_to_kill = old_block_ids - new_block_ids
# Make sure no unavailable or already-assigned blocks slipped in
filtered_ids_to_add = Blockface.objects \
.filter(id__in=ids_to_add) \
.filter(is_available=True) \
.filter(territory=None) \
.values_list('id', flat=True)
new_territory = [Territory(group=group, blockface_id=id)
for id in filtered_ids_to_add]
Territory.objects.bulk_create(new_territory)
Territory.objects \
.filter(blockface_id__in=ids_to_kill) \
.delete()
def _update_event_maps(request, group):
events = Event.objects \
.filter(group_id=group.id, begins_at__gt=now()) \
.select_related('group')
for event in events:
create_event_map_pdf(request, event)
def _make_blockface_and_tiler_urls_result(request, blockfaces, group_id):
result = {
'blockDataList': _make_blockface_data_result(blockfaces),
'tilerUrls': get_context_for_territory_admin_layer(request, group_id)
}
return result
def _make_blockface_data_result(blockfaces):
block_data_list = [{'id': bf.id, 'geojson': bf.geom.json}
for bf in blockfaces]
return block_data_list
| {
"content_hash": "245a3f944cd4eac948a49263f01aee37",
"timestamp": "",
"source": "github",
"line_count": 336,
"max_line_length": 78,
"avg_line_length": 34.25595238095238,
"alnum_prop": 0.635881841876629,
"repo_name": "RickMohr/nyc-trees",
"id": "0430d00d3c50a470409f54b8f021ae97848c2ee7",
"size": "11534",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/nyc_trees/apps/users/views/group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "171372"
},
{
"name": "CartoCSS",
"bytes": "878"
},
{
"name": "HTML",
"bytes": "157969"
},
{
"name": "JavaScript",
"bytes": "286316"
},
{
"name": "Makefile",
"bytes": "1524"
},
{
"name": "PLpgSQL",
"bytes": "3210"
},
{
"name": "Python",
"bytes": "404021"
},
{
"name": "Shell",
"bytes": "23399"
}
],
"symlink_target": ""
} |
import warnings
from .._tqdm import TqdmExperimentalWarning
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=TqdmExperimentalWarning)
from ..autonotebook import tqdm, trange
__all__ = ["tqdm", "trange"]
| {
"content_hash": "2595198968bc629b8805b675209099b4",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 69,
"avg_line_length": 39.166666666666664,
"alnum_prop": 0.7574468085106383,
"repo_name": "huguesv/PTVS",
"id": "b475793ca118fd166b54a96925b2fd5cbd33329c",
"size": "235",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/tqdm/auto/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "109"
},
{
"name": "Batchfile",
"bytes": "10898"
},
{
"name": "C",
"bytes": "23236"
},
{
"name": "C#",
"bytes": "12464429"
},
{
"name": "C++",
"bytes": "211838"
},
{
"name": "CSS",
"bytes": "7025"
},
{
"name": "HTML",
"bytes": "34251"
},
{
"name": "JavaScript",
"bytes": "87257"
},
{
"name": "PowerShell",
"bytes": "25220"
},
{
"name": "Python",
"bytes": "913395"
},
{
"name": "Rich Text Format",
"bytes": "260880"
},
{
"name": "Smarty",
"bytes": "8156"
},
{
"name": "Tcl",
"bytes": "24968"
}
],
"symlink_target": ""
} |
'''
$ sudo ip netns add ns1
$ sudo ip link set dev veth1 netns ns1
$ sudo ip netns exec ns1 bash
# python recv.py
'''
import pcap
from ryu.lib.packet import packet, icmpv6
for ts, data in pcap.pcap():
pkt = packet.Packet(data)
i = pkt.get_protocol(icmpv6.icmpv6)
m = i.data
print ts, i | {
"content_hash": "0c00b3bf184a621e4ccccd4f0715ce78",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 41,
"avg_line_length": 21.571428571428573,
"alnum_prop": 0.6721854304635762,
"repo_name": "ntts-clo/mld-ryu",
"id": "0f3535f0e0a64051e8dc59d50797f72ec285d4fd",
"size": "302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mld/sample/recv.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Erlang",
"bytes": "870216"
},
{
"name": "Python",
"bytes": "4898989"
},
{
"name": "Shell",
"bytes": "14336"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
name = models.CharField(_("Name of User"), blank=True, max_length=255)
def __str__(self):
return self.username
def get_absolute_url(self):
return reverse("users:detail", kwargs={"username": self.username})
| {
"content_hash": "bdc51417b5bd2ffdfd3b70bd6c4637ed",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 74,
"avg_line_length": 30.294117647058822,
"alnum_prop": 0.7126213592233009,
"repo_name": "evanepio/dotmanca",
"id": "5d814f1e24bec77305413e01d4444a470099bc95",
"size": "515",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "dotmanca/users/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2704"
},
{
"name": "Dockerfile",
"bytes": "1874"
},
{
"name": "HTML",
"bytes": "12635"
},
{
"name": "Makefile",
"bytes": "192"
},
{
"name": "Python",
"bytes": "83822"
},
{
"name": "Shell",
"bytes": "860"
}
],
"symlink_target": ""
} |
from oslo_log import versionutils
from oslo_policy import policy
from neutron.conf.policies import base
DEPRECATED_REASON = """
The network segment range API now supports project scope and default roles.
"""
COLLECTION_PATH = '/network_segment_ranges'
RESOURCE_PATH = '/network_segment_ranges/{id}'
rules = [
policy.DocumentedRuleDefault(
name='create_network_segment_range',
check_str=base.ADMIN,
scope_types=['project'],
description='Create a network segment range',
operations=[
{
'method': 'POST',
'path': COLLECTION_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='create_network_segment_range',
check_str=base.RULE_ADMIN_ONLY,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
policy.DocumentedRuleDefault(
name='get_network_segment_range',
check_str=base.ADMIN,
scope_types=['project'],
description='Get a network segment range',
operations=[
{
'method': 'GET',
'path': COLLECTION_PATH,
},
{
'method': 'GET',
'path': RESOURCE_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='get_network_segment_range',
check_str=base.RULE_ADMIN_ONLY,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
policy.DocumentedRuleDefault(
name='update_network_segment_range',
check_str=base.ADMIN,
scope_types=['project'],
description='Update a network segment range',
operations=[
{
'method': 'PUT',
'path': RESOURCE_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='update_network_segment_range',
check_str=base.RULE_ADMIN_ONLY,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
policy.DocumentedRuleDefault(
name='delete_network_segment_range',
check_str=base.ADMIN,
scope_types=['project'],
description='Delete a network segment range',
operations=[
{
'method': 'DELETE',
'path': RESOURCE_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='delete_network_segment_range',
check_str=base.RULE_ADMIN_ONLY,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
]
def list_rules():
return rules
| {
"content_hash": "9964f72dfa6a48a2f4b83457e3f9b5f0",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 75,
"avg_line_length": 30.78021978021978,
"alnum_prop": 0.5680114244912531,
"repo_name": "openstack/neutron",
"id": "b0e07640f76bb8e3d0cb689d467dc133ffc33402",
"size": "3418",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/conf/policies/network_segment_range.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "2773"
},
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "15932611"
},
{
"name": "Ruby",
"bytes": "1257"
},
{
"name": "Shell",
"bytes": "83270"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import collections
import six
from collections import OrderedDict
from datetime import timedelta
from django.conf import settings
from django.utils import timezone
from enum import Enum
from sentry.utils.dates import to_datetime, to_timestamp
from sentry.utils.services import Service
ONE_MINUTE = 60
ONE_HOUR = ONE_MINUTE * 60
ONE_DAY = ONE_HOUR * 24
class TSDBModel(Enum):
internal = 0
# number of events seen specific to grouping
project = 1
group = 4
release = 7
# the number of events sent to the server
project_total_received = 100
# the number of events rejected due to rate limiting
project_total_rejected = 101
# the number of events blocked due to being blacklisted
project_total_blacklisted = 104
# the number of events forwarded to third party processors (data forwarding)
project_total_forwarded = 105
# the number of events sent to the server
organization_total_received = 200
# the number of events rejected due to rate limiting
organization_total_rejected = 201
# the number of events blocked due to being blacklisted
organization_total_blacklisted = 202
# distinct count of users that have been affected by an event in a group
users_affected_by_group = 300
# distinct count of users that have been affected by an event in a project
users_affected_by_project = 301
# frequent_organization_received_by_system = 400
# frequent_organization_rejected_by_system = 401
# frequent_organization_blacklisted_by_system = 402
# frequent_values_by_issue_tag = 405
# number of events seen for a project, by organization
frequent_projects_by_organization = 403
# number of issues seen for a project, by project
frequent_issues_by_project = 404
# number of events seen for a release, by issue
# frequent_releases_by_group = 406 # DEPRECATED
# number of events seen for a release, by issue
frequent_releases_by_group = 407
# number of events seen for an environment, by issue
frequent_environments_by_group = 408
# the number of events sent to the server
key_total_received = 500
# the number of events rejected due to rate limiting
key_total_rejected = 501
# the number of events blocked due to being blacklisted
key_total_blacklisted = 502
# the number of events filtered by ip
project_total_received_ip_address = 601
# the number of events filtered by release
project_total_received_release_version = 602
# the number of events filtered by error message
project_total_received_error_message = 603
# the number of events filtered by browser extension
project_total_received_browser_extensions = 604
# the number of events filtered by legacy browser
project_total_received_legacy_browsers = 605
# the number of events filtered by localhost
project_total_received_localhost = 606
# the number of events filtered by web crawlers
project_total_received_web_crawlers = 607
# the number of events filtered by invalid csp
project_total_received_invalid_csp = 608
# the number of events filtered by invalid origin
project_total_received_cors = 609
# the number of events filtered because their group was discarded
project_total_received_discarded = 610
servicehook_fired = 700
class BaseTSDB(Service):
__read_methods__ = frozenset(
[
"get_range",
"get_sums",
"get_distinct_counts_series",
"get_distinct_counts_totals",
"get_distinct_counts_union",
"get_most_frequent",
"get_most_frequent_series",
"get_frequency_series",
"get_frequency_totals",
]
)
__write_methods__ = frozenset(
[
"incr",
"incr_multi",
"merge",
"delete",
"record",
"record_multi",
"merge_distinct_counts",
"delete_distinct_counts",
"record_frequency_multi",
"merge_frequencies",
"delete_frequencies",
"flush",
]
)
__all__ = (
frozenset(
[
"get_earliest_timestamp",
"get_optimal_rollup",
"get_optimal_rollup_series",
"get_rollups",
"make_series",
"models",
"models_with_environment_support",
"normalize_to_epoch",
"rollup",
]
)
| __write_methods__
| __read_methods__
)
models = TSDBModel
models_with_environment_support = frozenset(
[
models.project,
models.group,
models.release,
models.users_affected_by_group,
models.users_affected_by_project,
]
)
def __init__(self, rollups=None, legacy_rollups=None, **options):
if rollups is None:
rollups = settings.SENTRY_TSDB_ROLLUPS
self.rollups = OrderedDict(rollups)
# The ``SENTRY_TSDB_LEGACY_ROLLUPS`` setting should be used to store
# previous rollup configuration values after they are modified in
# ``SENTRY_TSDB_ROLLUPS``. The values can be removed after the new
# rollup period is full of new data.
if legacy_rollups is None:
legacy_rollups = getattr(settings, "SENTRY_TSDB_LEGACY_ROLLUPS", {})
self.__legacy_rollups = legacy_rollups
def validate_arguments(self, models, environment_ids):
if any(e is not None for e in environment_ids):
unsupported_models = set(models) - self.models_with_environment_support
if unsupported_models:
raise ValueError("not all models support environment parameters")
def get_rollups(self):
return self.rollups
def normalize_to_epoch(self, timestamp, seconds):
"""
Given a ``timestamp`` (datetime object) normalize to an epoch timestamp.
i.e. if the rollup is minutes, the resulting timestamp would have
the seconds and microseconds rounded down.
"""
epoch = int(to_timestamp(timestamp))
return epoch - (epoch % seconds)
def normalize_ts_to_epoch(self, epoch, seconds):
"""
Given a ``epoch`` normalize to an epoch rollup.
"""
return epoch - (epoch % seconds)
def normalize_to_rollup(self, timestamp, seconds):
"""
Given a ``timestamp`` (datetime object) normalize to an epoch rollup.
"""
epoch = int(to_timestamp(timestamp))
return int(epoch / seconds)
def normalize_ts_to_rollup(self, epoch, seconds):
"""
Given a ``epoch`` normalize to an epoch rollup.
"""
return int(epoch / seconds)
def get_optimal_rollup(self, start_timestamp, end_timestamp):
"""
Identify the lowest granularity rollup available within the given time
range.
"""
num_seconds = int(to_timestamp(end_timestamp)) - int(to_timestamp(start_timestamp))
# This loop attempts to find the smallest possible rollup that will
# contain both the start and end timestamps. ``self.rollups`` is
# ordered from the highest resolution (smallest interval) to lowest
# resolution (largest interval.)
# XXX: There is a bug here, since this function assumes that the end
# timestamp is always equal to or greater than the current time. If the
# time range is shifted far enough into the past (e.g. a 30 second
# window, retrieved several days after it's occurrence), this can
# return a rollup that has already been evicted due to TTL, even if a
# lower resolution representation of the range exists.
for rollup, samples in six.iteritems(self.rollups):
if rollup * samples >= num_seconds:
return rollup
# If nothing actually matches the requested range, just return the
# lowest resolution interval.
return list(self.rollups)[-1]
def get_optimal_rollup_series(self, start, end=None, rollup=None):
if end is None:
end = timezone.now()
if rollup is None:
rollup = self.get_optimal_rollup(start, end)
# This attempts to create a range with a duration as close as possible
# to the requested interval using the requested (or inferred) rollup
# resolution. This result always includes the ``end`` timestamp, but
# may not include the ``start`` timestamp.
series = []
timestamp = end
while timestamp >= start:
series.append(self.normalize_to_epoch(timestamp, rollup))
timestamp = timestamp - timedelta(seconds=rollup)
return rollup, sorted(series)
def get_active_series(self, start=None, end=None, timestamp=None):
rollups = {}
for rollup, samples in self.rollups.items():
_, series = self.get_optimal_rollup_series(
start
if start is not None
else to_datetime(self.get_earliest_timestamp(rollup, timestamp=timestamp)),
end,
rollup=rollup,
)
rollups[rollup] = map(to_datetime, series)
return rollups
def make_series(self, default, start, end=None, rollup=None):
f = default if isinstance(default, collections.Callable) else lambda timestamp: default
return [
(timestamp, f(timestamp))
for timestamp in self.get_optimal_rollup_series(start, end, rollup)[1]
]
def calculate_expiry(self, rollup, samples, timestamp):
"""
Calculate the expiration time for a rollup.
:param rollup: rollup interval (in seconds)
:param samples: number of samples to maintain
:param timestamp: datetime used to calculate the rollup epoch
"""
epoch = self.normalize_to_epoch(timestamp, rollup)
return epoch + (rollup * samples)
def get_earliest_timestamp(self, rollup, timestamp=None):
"""
Calculate the earliest available timestamp for a rollup.
"""
if timestamp is None:
timestamp = timezone.now()
samples = self.__legacy_rollups.get(rollup)
if samples is None:
samples = self.rollups[rollup]
lifespan = timedelta(seconds=rollup * (samples - 1))
return self.normalize_to_epoch(timestamp - lifespan, rollup)
def incr(self, model, key, timestamp=None, count=1, environment_id=None):
"""
Increment project ID=1:
>>> incr(TimeSeriesModel.project, 1)
"""
raise NotImplementedError
def incr_multi(self, items, timestamp=None, count=1, environment_id=None):
"""
Increment project ID=1 and group ID=5:
>>> incr_multi([(TimeSeriesModel.project, 1), (TimeSeriesModel.group, 5)])
"""
for model, key in items:
self.incr(model, key, timestamp, count, environment_id=environment_id)
def merge(self, model, destination, sources, timestamp=None, environment_ids=None):
"""
Transfer all counters from the source keys to the destination key.
"""
raise NotImplementedError
def delete(self, models, keys, start=None, end=None, timestamp=None, environment_ids=None):
"""
Delete all counters.
"""
raise NotImplementedError
def get_range(self, model, keys, start, end, rollup=None, environment_ids=None):
"""
To get a range of data for group ID=[1, 2, 3]:
Returns a mapping of key => [(timestamp, count), ...].
>>> now = timezone.now()
>>> get_range([TSDBModel.group], [1, 2, 3],
>>> start=now - timedelta(days=1),
>>> end=now)
"""
raise NotImplementedError
def get_sums(self, model, keys, start, end, rollup=None, environment_id=None):
range_set = self.get_range(
model,
keys,
start,
end,
rollup,
environment_ids=[environment_id] if environment_id is not None else None,
)
sum_set = dict(
(key, sum(p for _, p in points)) for (key, points) in six.iteritems(range_set)
)
return sum_set
def rollup(self, values, rollup):
"""
Given a set of values (as returned from ``get_range``), roll them up
using the ``rollup`` time (in seconds).
"""
normalize_ts_to_epoch = self.normalize_ts_to_epoch
result = {}
for key, points in six.iteritems(values):
result[key] = []
last_new_ts = None
for (ts, count) in points:
new_ts = normalize_ts_to_epoch(ts, rollup)
if new_ts == last_new_ts:
result[key][-1][1] += count
else:
result[key].append([new_ts, count])
last_new_ts = new_ts
return result
def record(self, model, key, values, timestamp=None, environment_id=None):
"""
Record occurences of items in a single distinct counter.
"""
raise NotImplementedError
def record_multi(self, items, timestamp=None, environment_id=None):
"""
Record occurences of items in multiple distinct counters.
"""
for model, key, values in items:
self.record(model, key, values, timestamp, environment_id=environment_id)
def get_distinct_counts_series(
self, model, keys, start, end=None, rollup=None, environment_id=None
):
"""
Fetch counts of distinct items for each rollup interval within the range.
"""
raise NotImplementedError
def get_distinct_counts_totals(
self, model, keys, start, end=None, rollup=None, environment_id=None
):
"""
Count distinct items during a time range.
"""
raise NotImplementedError
def get_distinct_counts_union(
self, model, keys, start, end=None, rollup=None, environment_id=None
):
"""
Count the total number of distinct items across multiple counters
during a time range.
"""
raise NotImplementedError
def merge_distinct_counts(
self, model, destination, sources, timestamp=None, environment_ids=None
):
"""
Transfer all distinct counters from the source keys to the
destination key.
"""
raise NotImplementedError
def delete_distinct_counts(
self, models, keys, start=None, end=None, timestamp=None, environment_ids=None
):
"""
Delete all distinct counters.
"""
raise NotImplementedError
def record_frequency_multi(self, requests, timestamp=None, environment_id=None):
"""
Record items in a frequency table.
Metrics to increment should be passed as sequence pairs, using this
structure: ``(model, {key: {item: score, ...}, ...})``
"""
raise NotImplementedError
def get_most_frequent(
self, model, keys, start, end=None, rollup=None, limit=None, environment_id=None
):
"""
Retrieve the most frequently seen items in a frequency table.
Results are returned as a mapping, where the key is the key requested
and the value is a list of ``(member, score)`` tuples, ordered by the
highest (most frequent) to lowest (least frequent) score. The maximum
number of items returned is ``index capacity * rollup intervals`` if no
``limit`` is provided.
"""
raise NotImplementedError
def get_most_frequent_series(
self, model, keys, start, end=None, rollup=None, limit=None, environment_id=None
):
"""
Retrieve the most frequently seen items in a frequency table for each
interval in a series. (This is in contrast with ``get_most_frequent``,
which returns the most frequent items seen over the entire requested
range.)
Results are returned as a mapping, where the key is the key requested
and the value is a list of ``(timestamp, {item: score, ...})`` pairs
over the series. The maximum number of items returned for each interval
is the index capacity if no ``limit`` is provided.
"""
raise NotImplementedError
def get_frequency_series(self, model, items, start, end=None, rollup=None, environment_id=None):
"""
Retrieve the frequency of known items in a table over time.
The items requested should be passed as a mapping, where the key is the
metric key, and the value is a sequence of members to retrieve scores
for.
Results are returned as a mapping, where the key is the key requested
and the value is a list of ``(timestamp, {item: score, ...})`` pairs
over the series.
"""
raise NotImplementedError
def get_frequency_totals(self, model, items, start, end=None, rollup=None, environment_id=None):
"""
Retrieve the total frequency of known items in a table over time.
The items requested should be passed as a mapping, where the key is the
metric key, and the value is a sequence of members to retrieve scores
for.
Results are returned as a mapping, where the key is the key requested
and the value is a mapping of ``{item: score, ...}`` containing the
total score of items over the interval.
"""
raise NotImplementedError
def merge_frequencies(self, model, destination, sources, timestamp=None, environment_ids=None):
"""
Transfer all frequency tables from the source keys to the destination
key.
"""
raise NotImplementedError
def delete_frequencies(
self, models, keys, start=None, end=None, timestamp=None, environment_ids=None
):
"""
Delete all frequency tables.
"""
raise NotImplementedError
def flush(self):
"""
Delete all data.
"""
raise NotImplementedError
| {
"content_hash": "8ee2201c6e87370134c75c90148646d9",
"timestamp": "",
"source": "github",
"line_count": 516,
"max_line_length": 100,
"avg_line_length": 35.58139534883721,
"alnum_prop": 0.6168300653594772,
"repo_name": "mvaled/sentry",
"id": "f0550962636a6d091bf6466ecaaa24bd73a3aab4",
"size": "18360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/tsdb/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226439"
},
{
"name": "Dockerfile",
"bytes": "6431"
},
{
"name": "HTML",
"bytes": "173429"
},
{
"name": "JavaScript",
"bytes": "9314175"
},
{
"name": "Lua",
"bytes": "65885"
},
{
"name": "Makefile",
"bytes": "9225"
},
{
"name": "Python",
"bytes": "50385401"
},
{
"name": "Ruby",
"bytes": "168"
},
{
"name": "Shell",
"bytes": "5685"
},
{
"name": "TypeScript",
"bytes": "773664"
}
],
"symlink_target": ""
} |
import requests, bs4, json, threading, getpass
from collections import Counter
def getPayload():
payload = {
'user[remember_me]': 0
}
# TODO: get username and password
username = input('Input your username: ')
password = getpass.getpass('Input your password: ')
payload['user[handle]'] = username
payload['user[password]'] = password
return payload
def getCount(startPage, endPage):
with requests.Session() as s:
p = s.post('http://www.patest.cn/users/sign_in', data=payload)
# print(p.text)
for page in range(startPage, endPage + 1):
print('getting page %d...' % page)
url = 'http://www.patest.cn/contests/pat-b-practise/submissions?page=%d' % page
res = s.get(url)
try:
res.raise_for_status()
except requests.HTTPError as exc:
if exc.response.status_code == 404:
print('page {} encountered 404'.format(page))
else:
raise
soup = bs4.BeautifulSoup(res.text)
table = soup.select('table')[0]
for row in table.find_all('tr')[1:]:
cells = row.find_all('td')
counter.update([cells[4].text])
if __name__ == '__main__':
counter = Counter()
payload = getPayload()
# TODO: multithreading
getThreads = []
for i in range(0, 1000, 100):
getThread = threading.Thread(target=getCount, args=(i + 1, i + 100))
getThreads.append(getThread)
getThread.start()
for thread in getThreads:
thread.join()
# TODO: print the result
print('\n------------------------------------------------------------------------')
# print(json.dumps(counter))
for lang in counter.keys():
print('%s : %d' % (lang, counter[lang]))
# print(counter)
| {
"content_hash": "beee45a898ccfaefbb2a4fcfc28fbaa7",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 91,
"avg_line_length": 30.483870967741936,
"alnum_prop": 0.5365079365079365,
"repo_name": "endvroy/PATlanguages",
"id": "6ff22029ededf6428c78ae7acd882e19f08951aa",
"size": "1890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3461"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/structure/general/shared_distant_ship_controller_imperial.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "b38abcfaf623be1482e6a85b3ebe4108",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 96,
"avg_line_length": 25,
"alnum_prop": 0.7076923076923077,
"repo_name": "obi-two/Rebelion",
"id": "0feb2ecb673c10a2977b8ffc92ed4b737bcce2e0",
"size": "470",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/static/structure/general/shared_distant_ship_controller_imperial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
import proto # type: ignore
from google.cloud.dialogflowcx_v3beta1.types import flow
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.dialogflow.cx.v3beta1",
manifest={
"CreateVersionOperationMetadata",
"Version",
"ListVersionsRequest",
"ListVersionsResponse",
"GetVersionRequest",
"CreateVersionRequest",
"UpdateVersionRequest",
"DeleteVersionRequest",
"LoadVersionRequest",
"CompareVersionsRequest",
"CompareVersionsResponse",
},
)
class CreateVersionOperationMetadata(proto.Message):
r"""Metadata associated with the long running operation for
[Versions.CreateVersion][google.cloud.dialogflow.cx.v3beta1.Versions.CreateVersion].
Attributes:
version (str):
Name of the created version. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/versions/<Version ID>``.
"""
version = proto.Field(
proto.STRING,
number=1,
)
class Version(proto.Message):
r"""Represents a version of a flow.
Attributes:
name (str):
Format: projects/<Project
ID>/locations/<Location ID>/agents/<Agent
ID>/flows/<Flow ID>/versions/<Version ID>.
Version ID is a self-increasing number generated
by Dialogflow upon version creation.
display_name (str):
Required. The human-readable name of the
version. Limit of 64 characters.
description (str):
The description of the version. The maximum
length is 500 characters. If exceeded, the
request is rejected.
nlu_settings (google.cloud.dialogflowcx_v3beta1.types.NluSettings):
Output only. The NLU settings of the flow at
version creation.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Create time of the version.
state (google.cloud.dialogflowcx_v3beta1.types.Version.State):
Output only. The state of this version. This
field is read-only and cannot be set by create
and update methods.
"""
class State(proto.Enum):
r"""The state of the version."""
STATE_UNSPECIFIED = 0
RUNNING = 1
SUCCEEDED = 2
FAILED = 3
name = proto.Field(
proto.STRING,
number=1,
)
display_name = proto.Field(
proto.STRING,
number=2,
)
description = proto.Field(
proto.STRING,
number=3,
)
nlu_settings = proto.Field(
proto.MESSAGE,
number=4,
message=flow.NluSettings,
)
create_time = proto.Field(
proto.MESSAGE,
number=5,
message=timestamp_pb2.Timestamp,
)
state = proto.Field(
proto.ENUM,
number=6,
enum=State,
)
class ListVersionsRequest(proto.Message):
r"""The request message for
[Versions.ListVersions][google.cloud.dialogflow.cx.v3beta1.Versions.ListVersions].
Attributes:
parent (str):
Required. The
[Flow][google.cloud.dialogflow.cx.v3beta1.Flow] to list all
versions for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>``.
page_size (int):
The maximum number of items to return in a
single page. By default 20 and at most 100.
page_token (str):
The next_page_token value returned from a previous list
request.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
class ListVersionsResponse(proto.Message):
r"""The response message for
[Versions.ListVersions][google.cloud.dialogflow.cx.v3beta1.Versions.ListVersions].
Attributes:
versions (Sequence[google.cloud.dialogflowcx_v3beta1.types.Version]):
A list of versions. There will be a maximum number of items
returned based on the page_size field in the request. The
list may in some cases be empty or contain fewer entries
than page_size even if this isn't the last page.
next_page_token (str):
Token to retrieve the next page of results,
or empty if there are no more results in the
list.
"""
@property
def raw_page(self):
return self
versions = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="Version",
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class GetVersionRequest(proto.Message):
r"""The request message for
[Versions.GetVersion][google.cloud.dialogflow.cx.v3beta1.Versions.GetVersion].
Attributes:
name (str):
Required. The name of the
[Version][google.cloud.dialogflow.cx.v3beta1.Version].
Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/versions/<Version ID>``.
"""
name = proto.Field(
proto.STRING,
number=1,
)
class CreateVersionRequest(proto.Message):
r"""The request message for
[Versions.CreateVersion][google.cloud.dialogflow.cx.v3beta1.Versions.CreateVersion].
Attributes:
parent (str):
Required. The
[Flow][google.cloud.dialogflow.cx.v3beta1.Flow] to create an
[Version][google.cloud.dialogflow.cx.v3beta1.Version] for.
Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>``.
version (google.cloud.dialogflowcx_v3beta1.types.Version):
Required. The version to create.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
version = proto.Field(
proto.MESSAGE,
number=2,
message="Version",
)
class UpdateVersionRequest(proto.Message):
r"""The request message for
[Versions.UpdateVersion][google.cloud.dialogflow.cx.v3beta1.Versions.UpdateVersion].
Attributes:
version (google.cloud.dialogflowcx_v3beta1.types.Version):
Required. The version to update.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The mask to control which fields get updated.
Currently only ``description`` and ``display_name`` can be
updated.
"""
version = proto.Field(
proto.MESSAGE,
number=1,
message="Version",
)
update_mask = proto.Field(
proto.MESSAGE,
number=2,
message=field_mask_pb2.FieldMask,
)
class DeleteVersionRequest(proto.Message):
r"""The request message for
[Versions.DeleteVersion][google.cloud.dialogflow.cx.v3beta1.Versions.DeleteVersion].
Attributes:
name (str):
Required. The name of the
[Version][google.cloud.dialogflow.cx.v3beta1.Version] to
delete. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/versions/<Version ID>``.
"""
name = proto.Field(
proto.STRING,
number=1,
)
class LoadVersionRequest(proto.Message):
r"""The request message for
[Versions.LoadVersion][google.cloud.dialogflow.cx.v3beta1.Versions.LoadVersion].
Attributes:
name (str):
Required. The
[Version][google.cloud.dialogflow.cx.v3beta1.Version] to be
loaded to draft flow. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/versions/<Version ID>``.
allow_override_agent_resources (bool):
This field is used to prevent accidental overwrite of other
agent resources, which can potentially impact other flow's
behavior. If ``allow_override_agent_resources`` is false,
conflicted agent-level resources will not be overridden
(i.e. intents, entities, webhooks).
"""
name = proto.Field(
proto.STRING,
number=1,
)
allow_override_agent_resources = proto.Field(
proto.BOOL,
number=2,
)
class CompareVersionsRequest(proto.Message):
r"""The request message for
[Versions.CompareVersions][google.cloud.dialogflow.cx.v3beta1.Versions.CompareVersions].
Attributes:
base_version (str):
Required. Name of the base flow version to compare with the
target version. Use version ID ``0`` to indicate the draft
version of the specified flow.
Format:
``projects/<Project ID>/locations/<Location ID>/agents/ <Agent ID>/flows/<Flow ID>/versions/<Version ID>``.
target_version (str):
Required. Name of the target flow version to compare with
the base version. Use version ID ``0`` to indicate the draft
version of the specified flow. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/versions/<Version ID>``.
language_code (str):
The language to compare the flow versions for.
If not specified, the agent's default language is used.
`Many
languages <https://cloud.google.com/dialogflow/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent
before they can be used.
"""
base_version = proto.Field(
proto.STRING,
number=1,
)
target_version = proto.Field(
proto.STRING,
number=2,
)
language_code = proto.Field(
proto.STRING,
number=3,
)
class CompareVersionsResponse(proto.Message):
r"""The response message for
[Versions.CompareVersions][google.cloud.dialogflow.cx.v3beta1.Versions.CompareVersions].
Attributes:
base_version_content_json (str):
JSON representation of the base version
content.
target_version_content_json (str):
JSON representation of the target version
content.
compare_time (google.protobuf.timestamp_pb2.Timestamp):
The timestamp when the two version compares.
"""
base_version_content_json = proto.Field(
proto.STRING,
number=1,
)
target_version_content_json = proto.Field(
proto.STRING,
number=2,
)
compare_time = proto.Field(
proto.MESSAGE,
number=3,
message=timestamp_pb2.Timestamp,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| {
"content_hash": "76ab531a71d894fe6b0fdd0d2560f1bf",
"timestamp": "",
"source": "github",
"line_count": 355,
"max_line_length": 119,
"avg_line_length": 30.70985915492958,
"alnum_prop": 0.6147495872317006,
"repo_name": "googleapis/python-dialogflow-cx",
"id": "415b9b2b73b207f2b29cc004b649081111e261f8",
"size": "11502",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/dialogflowcx_v3beta1/types/version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "10904903"
},
{
"name": "Shell",
"bytes": "30681"
}
],
"symlink_target": ""
} |
"""The tests for the counter component."""
# pylint: disable=protected-access
import asyncio
import unittest
import logging
from homeassistant.core import CoreState, State
from homeassistant.setup import setup_component, async_setup_component
from homeassistant.components.counter import (
DOMAIN, decrement, increment, reset, CONF_INITIAL, CONF_STEP, CONF_NAME,
CONF_ICON)
from homeassistant.const import (ATTR_ICON, ATTR_FRIENDLY_NAME)
from tests.common import (get_test_home_assistant, mock_restore_cache)
_LOGGER = logging.getLogger(__name__)
class TestCounter(unittest.TestCase):
"""Test the counter component."""
# pylint: disable=invalid-name
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
# pylint: disable=invalid-name
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_config(self):
"""Test config."""
invalid_configs = [
None,
1,
{},
{'name with space': None},
]
for cfg in invalid_configs:
self.assertFalse(
setup_component(self.hass, DOMAIN, {DOMAIN: cfg}))
def test_methods(self):
"""Test increment, decrement, and reset methods."""
config = {
DOMAIN: {
'test_1': {},
}
}
assert setup_component(self.hass, 'counter', config)
entity_id = 'counter.test_1'
state = self.hass.states.get(entity_id)
self.assertEqual(0, int(state.state))
increment(self.hass, entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(1, int(state.state))
increment(self.hass, entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(2, int(state.state))
decrement(self.hass, entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(1, int(state.state))
reset(self.hass, entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(0, int(state.state))
def test_methods_with_config(self):
"""Test increment, decrement, and reset methods with configuration."""
config = {
DOMAIN: {
'test': {
CONF_NAME: 'Hello World',
CONF_INITIAL: 10,
CONF_STEP: 5,
}
}
}
assert setup_component(self.hass, 'counter', config)
entity_id = 'counter.test'
state = self.hass.states.get(entity_id)
self.assertEqual(10, int(state.state))
increment(self.hass, entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(15, int(state.state))
increment(self.hass, entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(20, int(state.state))
decrement(self.hass, entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(15, int(state.state))
def test_config_options(self):
"""Test configuration options."""
count_start = len(self.hass.states.entity_ids())
_LOGGER.debug('ENTITIES @ start: %s', self.hass.states.entity_ids())
config = {
DOMAIN: {
'test_1': {},
'test_2': {
CONF_NAME: 'Hello World',
CONF_ICON: 'mdi:work',
CONF_INITIAL: 10,
CONF_STEP: 5,
}
}
}
assert setup_component(self.hass, 'counter', config)
self.hass.block_till_done()
_LOGGER.debug('ENTITIES: %s', self.hass.states.entity_ids())
self.assertEqual(count_start + 2, len(self.hass.states.entity_ids()))
self.hass.block_till_done()
state_1 = self.hass.states.get('counter.test_1')
state_2 = self.hass.states.get('counter.test_2')
self.assertIsNotNone(state_1)
self.assertIsNotNone(state_2)
self.assertEqual(0, int(state_1.state))
self.assertNotIn(ATTR_ICON, state_1.attributes)
self.assertNotIn(ATTR_FRIENDLY_NAME, state_1.attributes)
self.assertEqual(10, int(state_2.state))
self.assertEqual('Hello World',
state_2.attributes.get(ATTR_FRIENDLY_NAME))
self.assertEqual('mdi:work', state_2.attributes.get(ATTR_ICON))
@asyncio.coroutine
def test_initial_state_overrules_restore_state(hass):
"""Ensure states are restored on startup."""
mock_restore_cache(hass, (
State('counter.test1', '11'),
State('counter.test2', '-22'),
))
hass.state = CoreState.starting
yield from async_setup_component(hass, DOMAIN, {
DOMAIN: {
'test1': {},
'test2': {
CONF_INITIAL: 10,
},
}})
state = hass.states.get('counter.test1')
assert state
assert int(state.state) == 0
state = hass.states.get('counter.test2')
assert state
assert int(state.state) == 10
@asyncio.coroutine
def test_no_initial_state_and_no_restore_state(hass):
"""Ensure that entity is create without initial and restore feature."""
hass.state = CoreState.starting
yield from async_setup_component(hass, DOMAIN, {
DOMAIN: {
'test1': {
CONF_STEP: 5,
}
}})
state = hass.states.get('counter.test1')
assert state
assert int(state.state) == 0
| {
"content_hash": "74dfbf613e5cd5e9d341086e32b98333",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 78,
"avg_line_length": 28.666666666666668,
"alnum_prop": 0.5783173734610123,
"repo_name": "LinuxChristian/home-assistant",
"id": "8dc04f0e76ab7b910d7cc8309ff0c8884d579b85",
"size": "5848",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/test_counter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13788"
},
{
"name": "HTML",
"bytes": "1733802"
},
{
"name": "JavaScript",
"bytes": "15192"
},
{
"name": "Python",
"bytes": "7415265"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "15154"
}
],
"symlink_target": ""
} |
"""
opentrons_shared_data: a python package wrapping json config definitions
for the opentrons stack
This package should never be installed on its own, only as a dependency of
the main opentrons package
"""
import os
import json
from .load import get_shared_data_root, load_shared_data
HERE = os.path.abspath(os.path.dirname(__file__))
try:
with open(os.path.join(HERE, 'package.json')) as pkg:
package_json = json.load(pkg)
__version__ = package_json.get('version')
except (FileNotFoundError, OSError):
__version__ = 'unknown'
__all__ = ['__version__', 'get_shared_data_root', 'load_shared_data']
| {
"content_hash": "82688292843fc35d38e3abb59a1fdea9",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 74,
"avg_line_length": 26.208333333333332,
"alnum_prop": 0.6963434022257552,
"repo_name": "OpenTrons/opentrons_sdk",
"id": "850d862fb69d19d131f1b4583c62674eebe58a2b",
"size": "629",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "shared-data/python/opentrons_shared_data/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "200255"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "django-organizations"
copyright = "2012-2021, Ben Lopatin and contributors"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "2.0"
# The full version, including alpha/beta/rc tags.
release = "2.0.2"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'furo'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "django-organizationsdoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"index",
"django-organizations.tex",
"django-organizations Documentation",
"Ben Lopatin",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"index",
"django-organizations",
"django-organizations Documentation",
["Ben Lopatin"],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"django-organizations",
"django-organizations Documentation",
"Ben Lopatin",
"django-organizations",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| {
"content_hash": "47f4e4b961b0eb74f141cb19e9dec6de",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 80,
"avg_line_length": 31.43621399176955,
"alnum_prop": 0.6849064013614348,
"repo_name": "bennylope/django-organizations",
"id": "e80b2440b174e250c261b0b1f8b03f851a71a84d",
"size": "8070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "5416"
},
{
"name": "Makefile",
"bytes": "2171"
},
{
"name": "Python",
"bytes": "204918"
}
],
"symlink_target": ""
} |
"""engine.SCons.Tool.aixf77
Tool-specific initialization for IBM Visual Age f77 Fortran compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/aixf77.py 2014/07/05 09:42:21 garyo"
import os.path
#import SCons.Platform.aix
import f77
# It would be good to look for the AIX F77 package the same way we're now
# looking for the C and C++ packages. This should be as easy as supplying
# the correct package names in the following list and uncommenting the
# SCons.Platform.aix_get_xlc() call the in the function below.
packages = []
def get_xlf77(env):
xlf77 = env.get('F77', 'xlf77')
xlf77_r = env.get('SHF77', 'xlf77_r')
#return SCons.Platform.aix.get_xlc(env, xlf77, xlf77_r, packages)
return (None, xlf77, xlf77_r, None)
def generate(env):
"""
Add Builders and construction variables for the Visual Age FORTRAN
compiler to an Environment.
"""
path, _f77, _shf77, version = get_xlf77(env)
if path:
_f77 = os.path.join(path, _f77)
_shf77 = os.path.join(path, _shf77)
f77.generate(env)
env['F77'] = _f77
env['SHF77'] = _shf77
def exists(env):
path, _f77, _shf77, version = get_xlf77(env)
if path and _f77:
xlf77 = os.path.join(path, _f77)
if os.path.exists(xlf77):
return xlf77
return None
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "046c40c6a2822832bfb974ec1150b0af",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 75,
"avg_line_length": 33.5125,
"alnum_prop": 0.7157776948899665,
"repo_name": "wfxiang08/Nuitka",
"id": "83c06442cc093a659061c5d15d103fe769b32481",
"size": "2681",
"binary": false,
"copies": "7",
"ref": "refs/heads/develop",
"path": "nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/aixf77.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5518"
},
{
"name": "Batchfile",
"bytes": "1810"
},
{
"name": "C",
"bytes": "36149"
},
{
"name": "C++",
"bytes": "441058"
},
{
"name": "Python",
"bytes": "4431574"
},
{
"name": "Shell",
"bytes": "2059"
}
],
"symlink_target": ""
} |
import requests
def http_get(url):
response = requests.get(url)
response.raise_for_status()
return response.text
| {
"content_hash": "a20e3d812b5c6c4ce4ae558ee3c12cdb",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 32,
"avg_line_length": 18.142857142857142,
"alnum_prop": 0.7007874015748031,
"repo_name": "teamfruit/defend_against_fruit",
"id": "97747f503fa21dfb6cd0b32032312b1e74f5db92",
"size": "127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pypi_redirect/pypi_redirect/server_app/http/_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "287994"
},
{
"name": "Shell",
"bytes": "515"
}
],
"symlink_target": ""
} |
"""RestSession class for creating 'connections' to the Cisco Spark APIs."""
# Use future for Python v2 and v3 compatibility
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from builtins import *
from past.builtins import basestring
from future import standard_library
standard_library.install_aliases()
import logging
import time
import urllib.parse
import warnings
import requests
from ciscosparkapi.exceptions import (
ciscosparkapiException,
SparkApiError,
SparkRateLimitError,
)
from ciscosparkapi.responsecodes import EXPECTED_RESPONSE_CODE
from ciscosparkapi.utils import (
validate_base_url,
check_response_code,
extract_and_parse_json,
)
__author__ = "Chris Lunsford"
__author_email__ = "chrlunsf@cisco.com"
__copyright__ = "Copyright (c) 2016 Cisco Systems, Inc."
__license__ = "MIT"
# Module Constants
DEFAULT_SINGLE_REQUEST_TIMEOUT = 60.0
DEFAULT_WAIT_ON_RATE_LIMIT = True
# Helper Functions
def _fix_next_url(next_url):
"""Remove max=null parameter from URL.
Patch for Cisco Spark Defect: 'next' URL returned in the Link headers of
the responses contain an errant 'max=null' parameter, which causes the
next request (to this URL) to fail if the URL is requested as-is.
This patch parses the next_url to remove the max=null parameter.
Args:
next_url(basestring): The 'next' URL to be parsed and cleaned.
Returns:
basestring: The clean URL to be used for the 'next' request.
Raises:
AssertionError: If the parameter types are incorrect.
ciscosparkapiException: If 'next_url' does not contain a valid API
endpoint URL (scheme, netloc and path).
"""
next_url = str(next_url)
parsed_url = urllib.parse.urlparse(next_url)
if not parsed_url.scheme or not parsed_url.netloc or not parsed_url.path:
error_message = "'next_url' must be a valid API endpoint URL, " \
"minimally containing a scheme, netloc and path."
raise ciscosparkapiException(error_message)
if parsed_url.query:
query_list = parsed_url.query.split('&')
if 'max=null' in query_list:
query_list.remove('max=null')
warnings.warn("`max=null` still present in next-URL returned "
"from Cisco Spark", Warning)
new_query = '&'.join(query_list)
parsed_url = list(parsed_url)
parsed_url[4] = new_query
return urllib.parse.urlunparse(parsed_url)
# Main module interface
class RestSession(object):
"""RESTful HTTP session class for making calls to the Cisco Spark APIs."""
def __init__(self, access_token, base_url, timeout=None,
single_request_timeout=DEFAULT_SINGLE_REQUEST_TIMEOUT,
wait_on_rate_limit=DEFAULT_WAIT_ON_RATE_LIMIT):
"""Initialize a new RestSession object.
Args:
access_token(basestring): The Spark access token to be used for
this session.
base_url(basestring): The base URL that will be suffixed onto API
endpoint relative URLs to produce a callable absolute URL.
timeout: [Deprecated] The timeout (seconds) for an API request.
single_request_timeout(float): The timeout (seconds) for a single
HTTP REST API request.
wait_on_rate_limit(bool): Enable or disable automatic rate-limit
handling.
"""
assert isinstance(access_token, basestring)
assert isinstance(base_url, basestring)
assert timeout is None or isinstance(timeout, (int, float))
assert (single_request_timeout is None or
isinstance(single_request_timeout, (int, float)))
assert isinstance(wait_on_rate_limit, bool)
super(RestSession, self).__init__()
# Initialize attributes and properties
self._base_url = str(validate_base_url(base_url))
self._access_token = str(access_token)
self._single_request_timeout = single_request_timeout
self._wait_on_rate_limit = wait_on_rate_limit
if timeout:
self.timeout = timeout
# Initialize a new `requests` session
self._req_session = requests.session()
# Update the headers of the `requests` session
self.update_headers({'Authorization': 'Bearer ' + access_token,
'Content-type': 'application/json;charset=utf-8'})
@property
def base_url(self):
"""The base URL for the API endpoints."""
return self._base_url
@property
def access_token(self):
"""The Cisco Spark access token used for this session."""
return self._access_token
@property
def timeout(self):
"""[Deprecated] The timeout (seconds) for an API request.
We are deprecating the timeout property in favor of the more
descriptive single_request_timeout property.
"""
warnings.warn("The 'timeout' property is being deprecated. Please use "
"the 'single_request_timeout' instead.",
DeprecationWarning)
return self._single_request_timeout
@timeout.setter
def timeout(self, value):
"""[Deprecated] The timeout (seconds) for an API request.
We are deprecating the timeout property in favor of the more
descriptive single_request_timeout property.
"""
warnings.warn("The 'timeout' property is being deprecated. Please use "
"the 'single_request_timeout' instead.",
DeprecationWarning)
assert value is None or value > 0
self._single_request_timeout = float(value)
@property
def single_request_timeout(self):
"""The timeout (seconds) for a single HTTP REST API request."""
return self._single_request_timeout
@single_request_timeout.setter
def single_request_timeout(self, value):
"""The timeout (seconds) for a single HTTP REST API request."""
assert (value is None or
(isinstance(value, (int, float)) and value > 0.0))
self._single_request_timeout = float(value)
@property
def wait_on_rate_limit(self):
"""Automatic rate-limit handling.
This setting enables or disables automatic rate-limit handling. When
enabled, rate-limited requests will be automatically be retried after
waiting `Retry-After` seconds (provided by Cisco Spark in the
rate-limit response header).
"""
return self._wait_on_rate_limit
@wait_on_rate_limit.setter
def wait_on_rate_limit(self, value):
"""Enable or disable automatic rate-limit handling."""
assert isinstance(value, bool)
self._wait_on_rate_limit = value
@property
def headers(self):
"""The HTTP headers used for requests in this session."""
return self._req_session.headers.copy()
def update_headers(self, headers):
"""Update the HTTP headers used for requests in this session.
Note: Updates provided by the dictionary passed as the `headers`
parameter to this method are merged into the session headers by adding
new key-value pairs and/or updating the values of existing keys. The
session headers are not replaced by the provided dictionary.
Args:
headers(dict): Updates to the current session headers.
"""
assert isinstance(headers, dict)
self._req_session.headers.update(headers)
def abs_url(self, url):
"""Given a relative or absolute URL; return an absolute URL.
Args:
url(basestring): A relative or absolute URL.
Returns:
str: An absolute URL.
"""
parsed_url = urllib.parse.urlparse(url)
if not parsed_url.scheme and not parsed_url.netloc:
# url is a relative URL; combine with base_url
return urllib.parse.urljoin(str(self.base_url), str(url))
else:
# url is already an absolute URL; return as is
return url
def request(self, method, url, erc, **kwargs):
"""Abstract base method for making requests to the Cisco Spark APIs.
This base method:
* Expands the API endpoint URL to an absolute URL
* Makes the actual HTTP request to the API endpoint
* Provides support for Spark rate-limiting
* Inspects response codes and raises exceptions as appropriate
Args:
method(basestring): The request-method type ('GET', 'POST', etc.).
url(basestring): The URL of the API endpoint to be called.
erc(int): The expected response code that should be returned by the
Cisco Spark API endpoint to indicate success.
**kwargs: Passed on to the requests package.
Raises:
SparkApiError: If anything other than the expected response code is
returned by the Cisco Spark API endpoint.
"""
logger = logging.getLogger(__name__)
# Ensure the url is an absolute URL
abs_url = self.abs_url(url)
# Update request kwargs with session defaults
kwargs.setdefault('timeout', self.single_request_timeout)
while True:
# Make the HTTP request to the API endpoint
response = self._req_session.request(method, abs_url, **kwargs)
try:
# Check the response code for error conditions
check_response_code(response, erc)
except SparkRateLimitError as e:
# Catch rate-limit errors
# Wait and retry if automatic rate-limit handling is enabled
if self.wait_on_rate_limit and e.retry_after:
logger.info("Received rate-limit message; "
"waiting {:0.0f} seconds."
"".format(e.retry_after))
time.sleep(e.retry_after)
continue
else:
# Re-raise the SparkRateLimitError
raise
else:
return response
def get(self, url, params=None, **kwargs):
"""Sends a GET request.
Args:
url(basestring): The URL of the API endpoint.
params(dict): The parameters for the HTTP GET request.
**kwargs:
erc(int): The expected (success) response code for the request.
others: Passed on to the requests package.
Raises:
SparkApiError: If anything other than the expected response code is
returned by the Cisco Spark API endpoint.
"""
assert isinstance(url, basestring)
assert params is None or isinstance(params, dict)
# Expected response code
erc = kwargs.pop('erc', EXPECTED_RESPONSE_CODE['GET'])
response = self.request('GET', url, erc, params=params, **kwargs)
return extract_and_parse_json(response)
def get_pages(self, url, params=None, **kwargs):
"""Return a generator that GETs and yields pages of data.
Provides native support for RFC5988 Web Linking.
Args:
url(basestring): The URL of the API endpoint.
params(dict): The parameters for the HTTP GET request.
**kwargs:
erc(int): The expected (success) response code for the request.
others: Passed on to the requests package.
Raises:
SparkApiError: If anything other than the expected response code is
returned by the Cisco Spark API endpoint.
"""
assert isinstance(url, basestring)
assert params is None or isinstance(params, dict)
# Expected response code
erc = kwargs.pop('erc', EXPECTED_RESPONSE_CODE['GET'])
# First request
response = self.request('GET', url, erc, params=params, **kwargs)
while True:
yield extract_and_parse_json(response)
if response.links.get('next'):
next_url = response.links.get('next').get('url')
# Patch for Cisco Spark 'max=null' in next URL bug.
# Testing shows that patch is no longer needed; raising a
# warnning if it is still taking effect;
# considering for future removal
next_url = _fix_next_url(next_url)
# Subsequent requests
response = self.request('GET', next_url, erc, **kwargs)
else:
break
def get_items(self, url, params=None, **kwargs):
"""Return a generator that GETs and yields individual JSON `items`.
Yields individual `items` from Cisco Spark's top-level {'items': [...]}
JSON objects. Provides native support for RFC5988 Web Linking. The
generator will request additional pages as needed until all items have
been returned.
Args:
url(basestring): The URL of the API endpoint.
params(dict): The parameters for the HTTP GET request.
**kwargs:
erc(int): The expected (success) response code for the request.
others: Passed on to the requests package.
Raises:
SparkApiError: If anything other than the expected response code is
returned by the Cisco Spark API endpoint.
ciscosparkapiException: If the returned response does not contain a
top-level dictionary with an 'items' key.
"""
# Get generator for pages of JSON data
pages = self.get_pages(url, params=params, **kwargs)
for json_page in pages:
assert isinstance(json_page, dict)
items = json_page.get('items')
if items is None:
error_message = "'items' key not found in JSON data: " \
"{!r}".format(json_page)
raise ciscosparkapiException(error_message)
else:
for item in items:
yield item
def post(self, url, json=None, data=None, **kwargs):
"""Sends a POST request.
Args:
url(basestring): The URL of the API endpoint.
json: Data to be sent in JSON format in tbe body of the request.
data: Data to be sent in the body of the request.
**kwargs:
erc(int): The expected (success) response code for the request.
others: Passed on to the requests package.
Raises:
SparkApiError: If anything other than the expected response code is
returned by the Cisco Spark API endpoint.
"""
assert isinstance(url, basestring)
# Expected response code
erc = kwargs.pop('erc', EXPECTED_RESPONSE_CODE['POST'])
response = self.request('POST', url, erc, json=json, data=data,
**kwargs)
return extract_and_parse_json(response)
def put(self, url, json=None, data=None, **kwargs):
"""Sends a PUT request.
Args:
url(basestring): The URL of the API endpoint.
json: Data to be sent in JSON format in tbe body of the request.
data: Data to be sent in the body of the request.
**kwargs:
erc(int): The expected (success) response code for the request.
others: Passed on to the requests package.
Raises:
SparkApiError: If anything other than the expected response code is
returned by the Cisco Spark API endpoint.
"""
assert isinstance(url, basestring)
# Expected response code
erc = kwargs.pop('erc', EXPECTED_RESPONSE_CODE['PUT'])
response = self.request('PUT', url, erc, json=json, data=data,
**kwargs)
return extract_and_parse_json(response)
def delete(self, url, **kwargs):
"""Sends a DELETE request.
Args:
url(basestring): The URL of the API endpoint.
**kwargs:
erc(int): The expected (success) response code for the request.
others: Passed on to the requests package.
Raises:
SparkApiError: If anything other than the expected response code is
returned by the Cisco Spark API endpoint.
"""
assert isinstance(url, basestring)
# Expected response code
erc = kwargs.pop('erc', EXPECTED_RESPONSE_CODE['DELETE'])
self.request('DELETE', url, erc, **kwargs)
| {
"content_hash": "bb9ac1eb31c504a820fbada42ee0374c",
"timestamp": "",
"source": "github",
"line_count": 471,
"max_line_length": 79,
"avg_line_length": 35.70700636942675,
"alnum_prop": 0.6060173623498633,
"repo_name": "jbogarin/ciscosparkapi",
"id": "9552ece21e9c01b9fd2f420df933a42d2c64c874",
"size": "16842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ciscosparkapi/restsession.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1542"
},
{
"name": "Python",
"bytes": "264814"
}
],
"symlink_target": ""
} |
import numpy as np
def compute_fsd_features(im_label, K=128, Fs=6, Delta=8, rprops=None):
"""
Calculates `Fourier shape descriptors` for each objects.
Parameters
----------
im_label : array_like
A labeled mask image wherein intensity of a pixel is the ID of the
object it belongs to. Non-zero values are considered to be foreground
objects.
K : int, optional
Number of points for boundary resampling to calculate fourier
descriptors. Default value = 128.
Fs : int, optional
Number of frequency bins for calculating FSDs. Default value = 6.
Delta : int, optional
Used to dilate nuclei and define cytoplasm region. Default value = 8.
rprops : output of skimage.measure.regionprops, optional
rprops = skimage.measure.regionprops( im_label ). If rprops is not
passed then it will be computed inside which will increase the
computation time.
Returns
-------
fdata: Pandas data frame containing the FSD features for each
object/label.
References
----------
.. [#] D. Zhang et al. "A comparative study on shape retrieval using
Fourier descriptors with different shape signatures," In Proc.
ICIMADE01, 2001.
"""
import pandas as pd
from skimage.measure import regionprops
from skimage.segmentation import find_boundaries
# List of feature names
feature_list = []
for i in range(0, Fs):
feature_list = np.append(feature_list, 'Shape.FSD' + str(i+1))
# get Label size x
sizex = im_label.shape[0]
sizey = im_label.shape[1]
# get the number of objects in Label
if rprops is None:
rprops = regionprops(im_label)
# create pandas data frame containing the features for each object
numFeatures = len(feature_list)
numLabels = len(rprops)
fdata = pd.DataFrame(np.zeros((numLabels, numFeatures)),
columns=feature_list)
# fourier descriptors, spaced evenly over the interval 1:K/2
Interval = np.round(
np.power(
2, np.linspace(0, np.log2(K)-1, Fs+1, endpoint=True)
)
).astype(np.uint8)
for i in range(numLabels):
# get bounds of dilated nucleus
min_row, max_row, min_col, max_col = \
_GetBounds(rprops[i].bbox, Delta, sizex, sizey)
# grab label mask
lmask = (
im_label[min_row:max_row, min_col:max_col] == rprops[i].label
).astype(np.bool)
# find boundaries
Bounds = np.argwhere(
find_boundaries(lmask, mode="inner").astype(np.uint8) == 1
)
# check length of boundaries
if len(Bounds) < 2:
fdata.iloc[i, :] = 0
else:
# compute fourier descriptors
fdata.iloc[i, :] = _FSDs(Bounds[:, 0], Bounds[:, 1], K, Interval)
return fdata
def _InterpolateArcLength(X, Y, K):
"""
Resamples boundary points [X, Y] at L total equal arc-length locations.
Parameters
----------
X : array_like
x points of boundaries
Y : array_like
y points of boundaries
K : int
Number of points for boundary resampling to calculate fourier
descriptors. Default value = 128.
Returns
-------
iX : array_like
L-length vector of horizontal interpolated coordinates with equal
arc-length spacing.
iY : array_like
L-length vector of vertical interpolated coordinates with equal
arc-length spacing.
"""
# generate spaced points 0, 1/k, 1
interval = np.linspace(0, 1, K+1)
# get segment lengths
slens = np.sqrt(np.diff(X)**2 + np.diff(Y)**2)
# normalize to unit length
slens = np.true_divide(slens, slens.sum())
# calculate cumulative length along boundary
cumulative = np.zeros(len(slens)+1)
cumulative[1:] = np.cumsum(slens)
# place points in 'Interval' along boundary
locations = np.digitize(interval, cumulative)
# clip to ends
locations[locations > len(slens)] = len(slens)
# linear interpolation
Lie = (interval - cumulative[locations-1])/slens[locations-1]
iX = X[locations-1] + (X[locations]-X[locations-1])*Lie
iY = Y[locations-1] + (Y[locations]-Y[locations-1])*Lie
return iX, iY
def _FSDs(X, Y, K, Intervals):
"""
Calculated FSDs from boundary points X,Y. Boundaries are resampled to have
K equally spaced points (arclength) around the shape. The curvature is
calculated using the cumulative angular function, measuring the
displacement of the tangent angle from the starting point of the boundary.
The K-length fft of the cumulative angular function is calculated, and
then the elements of 'F' are summed as the spectral energy over
'Intervals'.
Parameters
----------
X : array_like
x points of boundaries
Y : array_like
y points of boundaries
K : int
Number of points for boundary resampling to calculate fourier
descriptors. Default value = 128.
Intervals : array_like
Intervals spaced evenly over 1:K/2.
Returns
-------
F : array_like
length(Intervals) vector containing spectral energy of
cumulative angular function, summed over defined 'Intervals'.
"""
# check input 'Intervals'
if Intervals[0] != 1.:
Intervals = np.hstack((1., Intervals))
if Intervals[-1] != (K / 2):
Intervals = np.hstack((Intervals, float(K)))
# get length of intervals
L = len(Intervals)
# initialize F
F = np.zeros((L-1, )).astype(float)
# interpolate boundaries
iX, iY = _InterpolateArcLength(X, Y, K)
# check if iXY.iX is not empty
if iX.size:
# calculate curvature
Curvature = np.arctan2(
(iY[1:] - iY[:-1]),
(iX[1:] - iX[:-1])
)
# make curvature cumulative
Curvature = Curvature - Curvature[0]
# calculate FFT
fX = np.fft.fft(Curvature).T
# spectral energy
fX = fX * fX.conj()
fX = fX / fX.sum()
# calculate 'F' values
for i in range(L-1):
F[i] = np.round(
fX[Intervals[i]-1:Intervals[i+1]].sum(), L
).real.astype(float)
return F
def _GetBounds(bbox, delta, M, N):
"""
Returns bounds of object in global label image.
Parameters
----------
bbox : tuple
Bounding box (min_row, min_col, max_row, max_col).
delta : int
Used to dilate nuclei and define cytoplasm region.
Default value = 8.
M : int
X size of label image.
N : int
Y size of label image.
Returns
-------
min_row : int
Minum row of the region bounds.
max_row : int
Maximum row of the region bounds.
min_col : int
Minum column of the region bounds.
max_col : int
Maximum column of the region bounds.
"""
min_row, min_col, max_row, max_col = bbox
min_row_out = max(0, (min_row - delta))
max_row_out = min(M-1, (max_row + delta))
min_col_out = max(0, (min_col - delta))
max_col_out = min(N-1, (max_col + delta))
return min_row_out, max_row_out, min_col_out, max_col_out
| {
"content_hash": "ffee112d7aebc84caab940a74b484057",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 78,
"avg_line_length": 30.654008438818565,
"alnum_prop": 0.6068823124569855,
"repo_name": "DigitalSlideArchive/HistomicsTK",
"id": "0d0cc2aa8e62715d15f3b1bbc135e54e10262190",
"size": "7265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "histomicstk/features/compute_fsd_features.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "1669"
},
{
"name": "Cython",
"bytes": "19226"
},
{
"name": "Dockerfile",
"bytes": "3235"
},
{
"name": "Python",
"bytes": "772710"
},
{
"name": "Shell",
"bytes": "965"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import numpy as np
from .. import backend as K
from .. import activations, initializations, regularizers
from ..engine import Layer, InputSpec
def time_distributed_dense(x, w, b=None, dropout=None,
input_dim=None, output_dim=None, timesteps=None):
'''Apply y.w + b for every temporal slice y of x.
'''
if not input_dim:
input_dim = K.shape(x)[2]
if not timesteps:
timesteps = K.shape(x)[1]
if not output_dim:
output_dim = K.shape(w)[1]
if dropout is not None and 0. < dropout < 1.:
# apply the same dropout pattern at every timestep
ones = K.ones_like(K.reshape(x[:, 0, :], (-1, input_dim)))
dropout_matrix = K.dropout(ones, dropout)
expanded_dropout_matrix = K.repeat(dropout_matrix, timesteps)
x = K.in_train_phase(x * expanded_dropout_matrix, x)
# collapse time dimension and batch dimension together
x = K.reshape(x, (-1, input_dim))
x = K.dot(x, w)
if b:
x = x + b
# reshape to 3D tensor
x = K.reshape(x, K.pack([-1, timesteps, output_dim]))
if K.backend() == 'tensorflow':
x.set_shape([None, None, output_dim])
return x
class Recurrent(Layer):
'''Abstract base class for recurrent layers.
Do not use in a model -- it's not a valid layer!
Use its children classes `LSTM`, `GRU` and `SimpleRNN` instead.
All recurrent layers (`LSTM`, `GRU`, `SimpleRNN`) also
follow the specifications of this class and accept
the keyword arguments listed below.
# Example
```python
# as the first layer in a Sequential model
model = Sequential()
model.add(LSTM(32, input_shape=(10, 64)))
# now model.output_shape == (None, 32)
# note: `None` is the batch dimension.
# the following is identical:
model = Sequential()
model.add(LSTM(32, input_dim=64, input_length=10))
# for subsequent layers, not need to specify the input size:
model.add(LSTM(16))
```
# Arguments
weights: list of Numpy arrays to set as initial weights.
The list should have 3 elements, of shapes:
`[(input_dim, output_dim), (output_dim, output_dim), (output_dim,)]`.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
go_backwards: Boolean (default False).
If True, process the input sequence backwards.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False). If True, the network will be unrolled,
else a symbolic loop will be used. When using TensorFlow, the network
is always unrolled, so this argument does not do anything.
Unrolling can speed-up a RNN, although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
consume_less: one of "cpu", "mem", or "gpu" (LSTM/GRU only).
If set to "cpu", the RNN will use
an implementation that uses fewer, larger matrix products,
thus running faster on CPU but consuming more memory.
If set to "mem", the RNN will use more matrix products,
but smaller ones, thus running slower (may actually be faster on GPU)
while consuming less memory.
If set to "gpu" (LSTM/GRU only), the RNN will combine the input gate,
the forget gate and the output gate into a single matrix,
enabling more time-efficient parallelization on the GPU. Note: RNN
dropout must be shared for all gates, resulting in a slightly
reduced regularization.
input_dim: dimensionality of the input (integer).
This argument (or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
input_length: Length of input sequences, to be specified
when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
Note that if the recurrent layer is not the first layer
in your model, you would need to specify the input length
at the level of the first layer
(e.g. via the `input_shape` argument)
# Input shape
3D tensor with shape `(nb_samples, timesteps, input_dim)`.
# Output shape
- if `return_sequences`: 3D tensor with shape
`(nb_samples, timesteps, output_dim)`.
- else, 2D tensor with shape `(nb_samples, output_dim)`.
# Masking
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
set to `True`.
# Note on performance
You will see much better performance with RNNs in Theano compared to
TensorFlow. Additionally, when using TensorFlow, it is preferable
to set `unroll=True` for better performance.
# Note on using statefulness in RNNs
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch.
This assumes a one-to-one mapping between
samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
if sequential model:
a `batch_input_shape=(...)` to the first layer in your model.
else for functional model with 1 or more Input layers:
a `batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs *including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
'''
def __init__(self, weights=None,
return_sequences=False, go_backwards=False, stateful=False,
unroll=False, consume_less='cpu',
input_dim=None, input_length=None, **kwargs):
self.return_sequences = return_sequences
self.initial_weights = weights
self.go_backwards = go_backwards
self.stateful = stateful
self.unroll = unroll
self.consume_less = consume_less
self.supports_masking = True
self.input_spec = [InputSpec(ndim=3)]
self.input_dim = input_dim
self.input_length = input_length
if self.input_dim:
kwargs['input_shape'] = (self.input_length, self.input_dim)
super(Recurrent, self).__init__(**kwargs)
def get_output_shape_for(self, input_shape):
if self.return_sequences:
return (input_shape[0], input_shape[1], self.output_dim)
else:
return (input_shape[0], self.output_dim)
def compute_mask(self, input, mask):
if self.return_sequences:
return mask
else:
return None
def step(self, x, states):
raise NotImplementedError
def get_constants(self, x):
return []
def get_initial_states(self, x):
# build an all-zero tensor of shape (samples, output_dim)
initial_state = K.zeros_like(x) # (samples, timesteps, input_dim)
initial_state = K.sum(initial_state, axis=(1, 2)) # (samples,)
initial_state = K.expand_dims(initial_state) # (samples, 1)
initial_state = K.tile(initial_state, [1, self.output_dim]) # (samples, output_dim)
initial_states = [initial_state for _ in range(len(self.states))]
return initial_states
def preprocess_input(self, x):
return x
def call(self, x, mask=None):
# input shape: (nb_samples, time (padded with zeros), input_dim)
# note that the .build() method of subclasses MUST define
# self.input_spec with a complete input shape.
input_shape = self.input_spec[0].shape
if self.stateful:
initial_states = self.states
else:
initial_states = self.get_initial_states(x)
constants = self.get_constants(x)
preprocessed_input = self.preprocess_input(x)
last_output, outputs, states = K.rnn(self.step, preprocessed_input,
initial_states,
go_backwards=self.go_backwards,
mask=mask,
constants=constants,
unroll=self.unroll,
input_length=input_shape[1])
if self.stateful:
self.updates = []
for i in range(len(states)):
self.updates.append((self.states[i], states[i]))
if self.return_sequences:
return outputs
else:
return last_output
def get_config(self):
config = {'return_sequences': self.return_sequences,
'go_backwards': self.go_backwards,
'stateful': self.stateful,
'unroll': self.unroll,
'consume_less': self.consume_less}
if self.stateful:
config['batch_input_shape'] = self.input_spec[0].shape
else:
config['input_dim'] = self.input_dim
config['input_length'] = self.input_length
base_config = super(Recurrent, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class SimpleRNN(Recurrent):
'''Fully-connected RNN where the output is to be fed back to input.
# Arguments
output_dim: dimension of the internal projections and the final output.
init: weight initialization function.
Can be the name of an existing function (str),
or a Theano function (see: [initializations](../initializations.md)).
inner_init: initialization function of the inner cells.
activation: activation function.
Can be the name of an existing function (str),
or a Theano function (see: [activations](../activations.md)).
W_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the input weights matrices.
U_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the recurrent weights matrices.
b_regularizer: instance of [WeightRegularizer](../regularizers.md),
applied to the bias.
dropout_W: float between 0 and 1. Fraction of the input units to drop for input gates.
dropout_U: float between 0 and 1. Fraction of the input units to drop for recurrent connections.
# References
- [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
'''
def __init__(self, output_dim,
init='glorot_uniform', inner_init='orthogonal',
activation='tanh',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.activation = activations.get(activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(SimpleRNN, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
if self.stateful:
self.reset_states()
else:
# initial states: all-zero tensor of shape (output_dim)
self.states = [None]
input_dim = input_shape[2]
self.input_dim = input_dim
self.W = self.init((input_dim, self.output_dim),
name='{}_W'.format(self.name))
self.U = self.inner_init((self.output_dim, self.output_dim),
name='{}_U'.format(self.name))
self.b = K.zeros((self.output_dim,), name='{}_b'.format(self.name))
self.regularizers = []
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
if self.U_regularizer:
self.U_regularizer.set_param(self.U)
self.regularizers.append(self.U_regularizer)
if self.b_regularizer:
self.b_regularizer.set_param(self.b)
self.regularizers.append(self.b_regularizer)
self.trainable_weights = [self.W, self.U, self.b]
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def reset_states(self):
assert self.stateful, 'Layer must be stateful.'
input_shape = self.input_spec[0].shape
if not input_shape[0]:
raise Exception('If a RNN is stateful, a complete ' +
'input_shape must be provided (including batch size).')
if hasattr(self, 'states'):
K.set_value(self.states[0],
np.zeros((input_shape[0], self.output_dim)))
else:
self.states = [K.zeros((input_shape[0], self.output_dim))]
def preprocess_input(self, x):
if self.consume_less == 'cpu':
input_shape = self.input_spec[0].shape
input_dim = input_shape[2]
timesteps = input_shape[1]
return time_distributed_dense(x, self.W, self.b, self.dropout_W,
input_dim, self.output_dim,
timesteps)
else:
return x
def step(self, x, states):
prev_output = states[0]
B_U = states[1]
B_W = states[2]
if self.consume_less == 'cpu':
h = x
else:
h = K.dot(x * B_W, self.W) + self.b
output = self.activation(h + K.dot(prev_output * B_U, self.U))
return output, [output]
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
constants.append(B_U)
else:
constants.append(K.cast_to_floatx(1.))
if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, input_dim))
B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
constants.append(B_W)
else:
constants.append(K.cast_to_floatx(1.))
return constants
def get_config(self):
config = {'output_dim': self.output_dim,
'init': self.init.__name__,
'inner_init': self.inner_init.__name__,
'activation': self.activation.__name__,
'W_regularizer': self.W_regularizer.get_config() if self.W_regularizer else None,
'U_regularizer': self.U_regularizer.get_config() if self.U_regularizer else None,
'b_regularizer': self.b_regularizer.get_config() if self.b_regularizer else None,
'dropout_W': self.dropout_W,
'dropout_U': self.dropout_U}
base_config = super(SimpleRNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class GRU(Recurrent):
'''Gated Recurrent Unit - Cho et al. 2014.
# Arguments
output_dim: dimension of the internal projections and the final output.
init: weight initialization function.
Can be the name of an existing function (str),
or a Theano function (see: [initializations](../initializations.md)).
inner_init: initialization function of the inner cells.
activation: activation function.
Can be the name of an existing function (str),
or a Theano function (see: [activations](../activations.md)).
inner_activation: activation function for the inner cells.
W_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the input weights matrices.
U_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the recurrent weights matrices.
b_regularizer: instance of [WeightRegularizer](../regularizers.md),
applied to the bias.
dropout_W: float between 0 and 1. Fraction of the input units to drop for input gates.
dropout_U: float between 0 and 1. Fraction of the input units to drop for recurrent connections.
# References
- [On the Properties of Neural Machine Translation: Encoder–Decoder Approaches](http://www.aclweb.org/anthology/W14-4012)
- [Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling](http://arxiv.org/pdf/1412.3555v1.pdf)
- [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
'''
def __init__(self, output_dim,
init='glorot_uniform', inner_init='orthogonal',
activation='tanh', inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(GRU, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
self.input_dim = input_shape[2]
if self.stateful:
self.reset_states()
else:
# initial states: all-zero tensor of shape (output_dim)
self.states = [None]
if self.consume_less == 'gpu':
self.W = self.init((self.input_dim, 3 * self.output_dim),
name='{}_W'.format(self.name))
self.U = self.inner_init((self.output_dim, 3 * self.output_dim),
name='{}_U'.format(self.name))
self.b = K.variable(np.hstack((np.zeros(self.output_dim),
np.zeros(self.output_dim),
np.zeros(self.output_dim))),
name='{}_b'.format(self.name))
self.trainable_weights = [self.W, self.U, self.b]
else:
self.W_z = self.init((self.input_dim, self.output_dim),
name='{}_W_z'.format(self.name))
self.U_z = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_z'.format(self.name))
self.b_z = K.zeros((self.output_dim,), name='{}_b_z'.format(self.name))
self.W_r = self.init((self.input_dim, self.output_dim),
name='{}_W_r'.format(self.name))
self.U_r = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_r'.format(self.name))
self.b_r = K.zeros((self.output_dim,), name='{}_b_r'.format(self.name))
self.W_h = self.init((self.input_dim, self.output_dim),
name='{}_W_h'.format(self.name))
self.U_h = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_h'.format(self.name))
self.b_h = K.zeros((self.output_dim,), name='{}_b_h'.format(self.name))
self.trainable_weights = [self.W_z, self.U_z, self.b_z,
self.W_r, self.U_r, self.b_r,
self.W_h, self.U_h, self.b_h]
self.W = K.concatenate([self.W_z, self.W_r, self.W_h])
self.U = K.concatenate([self.U_z, self.U_r, self.U_h])
self.b = K.concatenate([self.b_z, self.b_r, self.b_h])
self.regularizers = []
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
if self.U_regularizer:
self.U_regularizer.set_param(self.U)
self.regularizers.append(self.U_regularizer)
if self.b_regularizer:
self.b_regularizer.set_param(self.b)
self.regularizers.append(self.b_regularizer)
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def reset_states(self):
assert self.stateful, 'Layer must be stateful.'
input_shape = self.input_spec[0].shape
if not input_shape[0]:
raise Exception('If a RNN is stateful, a complete ' +
'input_shape must be provided (including batch size).')
if hasattr(self, 'states'):
K.set_value(self.states[0],
np.zeros((input_shape[0], self.output_dim)))
else:
self.states = [K.zeros((input_shape[0], self.output_dim))]
def preprocess_input(self, x):
if self.consume_less == 'cpu':
input_shape = self.input_spec[0].shape
input_dim = input_shape[2]
timesteps = input_shape[1]
x_z = time_distributed_dense(x, self.W_z, self.b_z, self.dropout_W,
input_dim, self.output_dim, timesteps)
x_r = time_distributed_dense(x, self.W_r, self.b_r, self.dropout_W,
input_dim, self.output_dim, timesteps)
x_h = time_distributed_dense(x, self.W_h, self.b_h, self.dropout_W,
input_dim, self.output_dim, timesteps)
return K.concatenate([x_z, x_r, x_h], axis=2)
else:
return x
def step(self, x, states):
h_tm1 = states[0] # previous memory
B_U = states[1] # dropout matrices for recurrent units
B_W = states[2]
if self.consume_less == 'gpu':
matrix_x = K.dot(x * B_W[0], self.W) + self.b
matrix_inner = K.dot(h_tm1 * B_U[0], self.U[:, :2 * self.output_dim])
x_z = matrix_x[:, :self.output_dim]
x_r = matrix_x[:, self.output_dim: 2 * self.output_dim]
inner_z = matrix_inner[:, :self.output_dim]
inner_r = matrix_inner[:, self.output_dim: 2 * self.output_dim]
z = self.inner_activation(x_z + inner_z)
r = self.inner_activation(x_r + inner_r)
x_h = matrix_x[:, 2 * self.output_dim:]
inner_h = K.dot(r * h_tm1 * B_U[0], self.U[:, 2 * self.output_dim:])
hh = self.activation(x_h + inner_h)
else:
if self.consume_less == 'cpu':
x_z = x[:, :self.output_dim]
x_r = x[:, self.output_dim: 2 * self.output_dim]
x_h = x[:, 2 * self.output_dim:]
elif self.consume_less == 'mem':
x_z = K.dot(x * B_W[0], self.W_z) + self.b_z
x_r = K.dot(x * B_W[1], self.W_r) + self.b_r
x_h = K.dot(x * B_W[2], self.W_h) + self.b_h
else:
raise Exception('Unknown `consume_less` mode.')
z = self.inner_activation(x_z + K.dot(h_tm1 * B_U[0], self.U_z))
r = self.inner_activation(x_r + K.dot(h_tm1 * B_U[1], self.U_r))
hh = self.activation(x_h + K.dot(r * h_tm1 * B_U[2], self.U_h))
h = z * h_tm1 + (1 - z) * hh
return h, [h]
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, input_dim))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
def get_config(self):
config = {'output_dim': self.output_dim,
'init': self.init.__name__,
'inner_init': self.inner_init.__name__,
'activation': self.activation.__name__,
'inner_activation': self.inner_activation.__name__,
'W_regularizer': self.W_regularizer.get_config() if self.W_regularizer else None,
'U_regularizer': self.U_regularizer.get_config() if self.U_regularizer else None,
'b_regularizer': self.b_regularizer.get_config() if self.b_regularizer else None,
'dropout_W': self.dropout_W,
'dropout_U': self.dropout_U}
base_config = super(GRU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class LSTM(Recurrent):
'''Long-Short Term Memory unit - Hochreiter 1997.
For a step-by-step description of the algorithm, see
[this tutorial](http://deeplearning.net/tutorial/lstm.html).
# Arguments
output_dim: dimension of the internal projections and the final output.
init: weight initialization function.
Can be the name of an existing function (str),
or a Theano function (see: [initializations](../initializations.md)).
inner_init: initialization function of the inner cells.
forget_bias_init: initialization function for the bias of the forget gate.
[Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
recommend initializing with ones.
activation: activation function.
Can be the name of an existing function (str),
or a Theano function (see: [activations](../activations.md)).
inner_activation: activation function for the inner cells.
W_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the input weights matrices.
U_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the recurrent weights matrices.
b_regularizer: instance of [WeightRegularizer](../regularizers.md),
applied to the bias.
dropout_W: float between 0 and 1. Fraction of the input units to drop for input gates.
dropout_U: float between 0 and 1. Fraction of the input units to drop for recurrent connections.
# References
- [Long short-term memory](http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf) (original 1997 paper)
- [Learning to forget: Continual prediction with LSTM](http://www.mitpressjournals.org/doi/pdf/10.1162/089976600300015015)
- [Supervised sequence labelling with recurrent neural networks](http://www.cs.toronto.edu/~graves/preprint.pdf)
- [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
'''
def __init__(self, output_dim,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh',
inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(LSTM, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
self.input_dim = input_shape[2]
if self.stateful:
self.reset_states()
else:
# initial states: 2 all-zero tensors of shape (output_dim)
self.states = [None, None]
if self.consume_less == 'gpu':
self.W = self.init((self.input_dim, 4 * self.output_dim),
name='{}_W'.format(self.name))
self.U = self.inner_init((self.output_dim, 4 * self.output_dim),
name='{}_U'.format(self.name))
self.b = K.variable(np.hstack((np.zeros(self.output_dim),
K.get_value(self.forget_bias_init((self.output_dim,))),
np.zeros(self.output_dim),
np.zeros(self.output_dim))),
name='{}_b'.format(self.name))
self.trainable_weights = [self.W, self.U, self.b]
else:
self.W_i = self.init((self.input_dim, self.output_dim),
name='{}_W_i'.format(self.name))
self.U_i = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_i'.format(self.name))
self.b_i = K.zeros((self.output_dim,), name='{}_b_i'.format(self.name))
self.W_f = self.init((self.input_dim, self.output_dim),
name='{}_W_f'.format(self.name))
self.U_f = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_f'.format(self.name))
self.b_f = self.forget_bias_init((self.output_dim,),
name='{}_b_f'.format(self.name))
self.W_c = self.init((self.input_dim, self.output_dim),
name='{}_W_c'.format(self.name))
self.U_c = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_c'.format(self.name))
self.b_c = K.zeros((self.output_dim,), name='{}_b_c'.format(self.name))
self.W_o = self.init((self.input_dim, self.output_dim),
name='{}_W_o'.format(self.name))
self.U_o = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_o'.format(self.name))
self.b_o = K.zeros((self.output_dim,), name='{}_b_o'.format(self.name))
self.trainable_weights = [self.W_i, self.U_i, self.b_i,
self.W_c, self.U_c, self.b_c,
self.W_f, self.U_f, self.b_f,
self.W_o, self.U_o, self.b_o]
self.W = K.concatenate([self.W_i, self.W_f, self.W_c, self.W_o])
self.U = K.concatenate([self.U_i, self.U_f, self.U_c, self.U_o])
self.b = K.concatenate([self.b_i, self.b_f, self.b_c, self.b_o])
self.regularizers = []
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
if self.U_regularizer:
self.U_regularizer.set_param(self.U)
self.regularizers.append(self.U_regularizer)
if self.b_regularizer:
self.b_regularizer.set_param(self.b)
self.regularizers.append(self.b_regularizer)
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def reset_states(self):
assert self.stateful, 'Layer must be stateful.'
input_shape = self.input_spec[0].shape
if not input_shape[0]:
raise Exception('If a RNN is stateful, a complete ' +
'input_shape must be provided (including batch size).')
if hasattr(self, 'states'):
K.set_value(self.states[0],
np.zeros((input_shape[0], self.output_dim)))
K.set_value(self.states[1],
np.zeros((input_shape[0], self.output_dim)))
else:
self.states = [K.zeros((input_shape[0], self.output_dim)),
K.zeros((input_shape[0], self.output_dim))]
def preprocess_input(self, x):
if self.consume_less == 'cpu':
if 0 < self.dropout_W < 1:
dropout = self.dropout_W
else:
dropout = 0
input_shape = self.input_spec[0].shape
input_dim = input_shape[2]
timesteps = input_shape[1]
x_i = time_distributed_dense(x, self.W_i, self.b_i, dropout,
input_dim, self.output_dim, timesteps)
x_f = time_distributed_dense(x, self.W_f, self.b_f, dropout,
input_dim, self.output_dim, timesteps)
x_c = time_distributed_dense(x, self.W_c, self.b_c, dropout,
input_dim, self.output_dim, timesteps)
x_o = time_distributed_dense(x, self.W_o, self.b_o, dropout,
input_dim, self.output_dim, timesteps)
return K.concatenate([x_i, x_f, x_c, x_o], axis=2)
else:
return x
def step(self, x, states):
h_tm1 = states[0]
c_tm1 = states[1]
B_U = states[2]
B_W = states[3]
if self.consume_less == 'gpu':
z = K.dot(x * B_W[0], self.W) + K.dot(h_tm1 * B_U[0], self.U) + self.b
z0 = z[:, :self.output_dim]
z1 = z[:, self.output_dim: 2 * self.output_dim]
z2 = z[:, 2 * self.output_dim: 3 * self.output_dim]
z3 = z[:, 3 * self.output_dim:]
i = self.inner_activation(z0)
f = self.inner_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.inner_activation(z3)
else:
if self.consume_less == 'cpu':
x_i = x[:, :self.output_dim]
x_f = x[:, self.output_dim: 2 * self.output_dim]
x_c = x[:, 2 * self.output_dim: 3 * self.output_dim]
x_o = x[:, 3 * self.output_dim:]
elif self.consume_less == 'mem':
x_i = K.dot(x * B_W[0], self.W_i) + self.b_i
x_f = K.dot(x * B_W[1], self.W_f) + self.b_f
x_c = K.dot(x * B_W[2], self.W_c) + self.b_c
x_o = K.dot(x * B_W[3], self.W_o) + self.b_o
else:
raise Exception('Unknown `consume_less` mode.')
i = self.inner_activation(x_i + K.dot(h_tm1 * B_U[0], self.U_i))
f = self.inner_activation(x_f + K.dot(h_tm1 * B_U[1], self.U_f))
c = f * c_tm1 + i * self.activation(x_c + K.dot(h_tm1 * B_U[2], self.U_c))
o = self.inner_activation(x_o + K.dot(h_tm1 * B_U[3], self.U_o))
h = o * self.activation(c)
return h, [h, c]
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, input_dim))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
def get_config(self):
config = {'output_dim': self.output_dim,
'init': self.init.__name__,
'inner_init': self.inner_init.__name__,
'forget_bias_init': self.forget_bias_init.__name__,
'activation': self.activation.__name__,
'inner_activation': self.inner_activation.__name__,
'W_regularizer': self.W_regularizer.get_config() if self.W_regularizer else None,
'U_regularizer': self.U_regularizer.get_config() if self.U_regularizer else None,
'b_regularizer': self.b_regularizer.get_config() if self.b_regularizer else None,
'dropout_W': self.dropout_W,
'dropout_U': self.dropout_U}
base_config = super(LSTM, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| {
"content_hash": "99c647a688ca3e67b73e32692f0119d8",
"timestamp": "",
"source": "github",
"line_count": 837,
"max_line_length": 130,
"avg_line_length": 46.81242532855436,
"alnum_prop": 0.5587769894339237,
"repo_name": "DeepGnosis/keras",
"id": "b41182deceb7ec26bab4ae07f3934287fc904bd9",
"size": "39208",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keras/layers/recurrent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "697"
},
{
"name": "Python",
"bytes": "960423"
}
],
"symlink_target": ""
} |
"""
Copyright 2012 Ali Ok (aliokATapacheDOTorg)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from trnltk.morphology.learner.controller.corpuscreatorcontroller import CorpusCreatorController
from trnltk.morphology.learner.requesthandler.sessionawarerequesthandler import SessionAwareRequestHandler
from trnltk.morphology.learner.ui import applicationcontext
from trnltk.morphology.learner.view.corpuscreatorview import CorpusCreatorView
from trnltk.tokenizer.texttokenizer import TextTokenizer
class CorpusCreatorHandler(SessionAwareRequestHandler):
def post(self):
param_corpus_name = self.request.get('corpusName')
param_corpus_desc = self.request.get('corpusDescription')
param_corpus_content = self.request.get('corpusContent')
assert param_corpus_name and param_corpus_desc and param_corpus_content
corpus_creator_view = CorpusCreatorView()
dbmanager = applicationcontext.application_context_instance.dbmanager
tokenizer = TextTokenizer()
self.controller = CorpusCreatorController(corpus_creator_view, dbmanager, tokenizer)
self.controller.create_corpus(param_corpus_name, param_corpus_desc, param_corpus_content)
view_context = corpus_creator_view.get_template_context()
self.render_response("corpuscreatortemplate.jinja2", **view_context) | {
"content_hash": "b37ae6eec78a33b3bbaaba318bfe7c71",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 106,
"avg_line_length": 44.609756097560975,
"alnum_prop": 0.7840349917987972,
"repo_name": "aliok/trnltk",
"id": "14ae90be94dcfcac0729c3708de2ef789d996bb2",
"size": "1829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trnltk/morphology/learner/requesthandler/corpuscreatorhandler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "60232"
},
{
"name": "Python",
"bytes": "1320401"
},
{
"name": "Shell",
"bytes": "2191"
}
],
"symlink_target": ""
} |
import cgi
import hashlib
from core import database
from base64 import b64encode, b64decode
from os import urandom
class User:
""" Represents a user """
def __init__(self, userID, userName, userHash, userSalt):
self.userID = userID
self.userName = userName
self.userHash = userHash
self.userSalt = userSalt
def checkPassword(self, password):
hashed = hashlib.sha512(password+self.userSalt).hexdigest()
return self.userHash == hashed
def exists(userName):
""" Does user defined by name exist? """
assert userName is not None and userName.strip() != "", "Invalid Username"
return fromName(userName) is not None
def all():
""" Gets all users """
out = []
conn = database.connect()
curr = conn.cursor()
curr.execute(
"""
select user_id, user_name, user_hash, user_salt from users
""",
)
for rec in curr.fetchall():
out.append(User(rec[0], rec[1], rec[2], rec[3]))
curr.close()
return out
def fromName(userName):
""" Gets a user by name """
assert userName is not None and userName.strip() != "", "Invalid Username"
conn = database.connect()
curr = conn.cursor()
curr.execute(
"""
select user_id, user_name, user_hash, user_salt
from users where user_name = ?
""",
(userName,)
)
rec = curr.fetchone()
curr.close()
if rec is not None:
return User(rec[0], rec[1], rec[2], rec[3])
else:
return None
def add(userName, userPass):
""" Adds a new user """
assert userName is not None and userName.strip() != "", "Invalid Username"
assert userPass is not None, "Invalid Password"
assert fromName(userName) is None, "User already existed"
salt = b64encode(urandom(32))
hashed = hashlib.sha512(userPass+salt).hexdigest()
conn = database.connect()
curr = conn.cursor()
curr.execute(
"""
insert into users (user_name, user_hash, user_salt) values (?, ?, ?)
""",
(userName, hashed, salt)
)
conn.commit()
curr.close()
def remove(userID):
""" Given a user ID, deletes that user + message/room data from them """
conn = database.connect()
curr = conn.cursor()
curr.execute("""delete from messages where user_id = ?""", (userName, hashed, salt))
curr.execute("""delete from rooms where user_id = ?""", (userName, hashed, salt))
curr.execute("""delete from users where user_id = ?""", (userName, hashed, salt))
conn.commit()
curr.close()
| {
"content_hash": "34cb93685e46f4b2e360783bdd80c187",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 88,
"avg_line_length": 26.151515151515152,
"alnum_prop": 0.6040942448821939,
"repo_name": "MatthewJWalls/Spiel",
"id": "387b7b012924a2d9703e4347519cd99c6d9c248f",
"size": "2590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Spiel/models/users.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2018"
},
{
"name": "JavaScript",
"bytes": "1038"
},
{
"name": "Python",
"bytes": "33333"
},
{
"name": "Shell",
"bytes": "167"
}
],
"symlink_target": ""
} |
import re
import logging
import sys
import urllib2 as u2
import kerberos as k
def getLogger():
log = logging.getLogger("http_kerberos_auth_handler")
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
return log
log = getLogger()
class AbstractKerberosAuthHandler:
"""auth handler for urllib2 that does Kerberos HTTP Negotiate Authentication
"""
def negotiate_value(self, headers):
"""checks for "Negotiate" in proper auth header
"""
authreq = headers.get(self.auth_header, None)
if authreq:
rx = re.compile('(?:.*,)*\s*Negotiate\s*([^,]*),?', re.I)
mo = rx.search(authreq)
if mo:
return mo.group(1)
else:
log.debug("regex failed on: %s" % authreq)
else:
log.debug("%s header not found" % self.auth_header)
return None
def __init__(self):
self.retried = 0
self.context = None
def generate_request_header(self, req, headers, neg_value):
self.retried += 1
log.debug("retry count: %d" % self.retried)
host = req.get_host()
log.debug("req.get_host() returned %s" % host)
domain = host.rsplit(':', 1)[0]
#result, self.context = k.authGSSClientInit("HTTP@%s" % domain)
result, self.context = k.authGSSClientInit("HTTP@%s" % "hadoop")
if result < 1:
log.warning("authGSSClientInit returned result %d" % result)
return None
log.debug("authGSSClientInit() succeeded")
result = k.authGSSClientStep(self.context, neg_value)
if result < 0:
log.warning("authGSSClientStep returned result %d" % result)
return None
log.debug("authGSSClientStep() succeeded")
response = k.authGSSClientResponse(self.context)
log.debug("authGSSClientResponse() succeeded")
return "Negotiate %s" % response
def authenticate_server(self, headers):
neg_value = self.negotiate_value(headers)
if neg_value is None:
log.critical("mutual auth failed. No negotiate header")
return None
result = k.authGSSClientStep(self.context, neg_value)
if result < 1:
# this is a critical security warning
# should change to a raise --Tim
log.critical("mutual auth failed: authGSSClientStep returned result %d" % result)
pass
def clean_context(self):
if self.context is not None:
log.debug("cleaning context")
k.authGSSClientClean(self.context)
self.context = None
def http_error_auth_reqed(self, host, req, headers):
neg_value = self.negotiate_value(headers) #Check for auth_header
if neg_value is not None:
if not self.retried > 0:
return self.retry_http_kerberos_auth(req, headers, neg_value)
else:
return None
else:
self.retried = 0
def retry_http_kerberos_auth(self, req, headers, neg_value):
try:
neg_hdr = self.generate_request_header(req, headers, neg_value)
if neg_hdr is None:
log.debug("neg_hdr was None")
return None
req.add_unredirected_header(self.authz_header, neg_hdr)
resp = self.parent.open(req)
self.authenticate_server(resp.info())
return resp
except k.GSSError, e:
self.clean_context()
self.retried = 0
log.critical("GSSAPI Error: %s/%s" % (e[0][0], e[1][0]))
return None
self.clean_context()
self.retried = 0
class ProxyKerberosAuthHandler(u2.BaseHandler, AbstractKerberosAuthHandler):
"""Kerberos Negotiation handler for HTTP proxy auth
"""
authz_header = 'Proxy-Authorization'
auth_header = 'proxy-authenticate'
handler_order = 480 # before Digest auth
def http_error_407(self, req, fp, code, msg, headers):
log.debug("inside http_error_407")
host = req.get_host()
retry = self.http_error_auth_reqed(host, req, headers)
self.retried = 0
return retry
class HTTPKerberosAuthHandler(u2.BaseHandler, AbstractKerberosAuthHandler):
"""Kerberos Negotiation handler for HTTP auth
"""
authz_header = 'Authorization'
auth_header = 'www-authenticate'
handler_order = 480 # before Digest auth
def http_error_401(self, req, fp, code, msg, headers):
log.debug("inside http_error_401")
host = req.get_host()
retry = self.http_error_auth_reqed(host, req, headers)
self.retried = 0
return retry
def test():
log.setLevel(logging.DEBUG)
log.info("starting test")
opener = u2.build_opener()
opener.add_handler(HTTPKerberosAuthHandler())
resp = opener.open(sys.argv[1])
print dir(resp), resp.info(), resp.code
print resp.read()
if __name__ == '__main__':
test()
| {
"content_hash": "da1ccedffd25b1a3a6682ae8da22384f",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 93,
"avg_line_length": 29.832369942196532,
"alnum_prop": 0.5962022863786088,
"repo_name": "atupal/urllib2_kerberos",
"id": "0c362755c3aef38ec0719e90469f231dcf6b43e1",
"size": "5831",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "urllib2_kerberos_hadoop.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15021"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.