code
stringlengths 1
25.8M
| language
stringclasses 18
values | source
stringclasses 4
values | repo
stringclasses 78
values | path
stringlengths 0
268
|
|---|---|---|---|---|
#include <c10/util/Exception.h>
#include <utility>
namespace at {
/*
[collapse dims] Updates sizes, and strides to reflect a "collapse" of
the info, possibly excluding the optional excludeDim. A "collapsed" version
of the info is the fewest dims that order the tensor's elements in the same
way as the original info. If excludeDim is specified, the collapse is the
fewest dims that order the tensor's elements as the original and preserve the
excluded dimension, unless the tensor collapses to a point.
This function returns a pair of values.
1) The (new) index of the preserved dimension if excludeDim is
specified. 0 if the tensor is collapsed to a point. -1
otherwise.
2) The new number of dimensions.
*/
template <typename T>
inline std::pair<int64_t, int64_t> collapse_dims(
T* sizes,
T* strides,
int64_t dims,
const int excludeDim = -1) {
TORCH_CHECK(
excludeDim >= -1 && excludeDim < dims,
"expected excluded dim between -1 and dims - 1");
int64_t stopDim = (excludeDim == -1) ? dims : excludeDim;
int64_t newIndex = -1;
int64_t oldIndex = 0;
int64_t remappedExcludedDim = -1;
while (oldIndex < dims) {
// Finds a dimension to collapse into
for (; oldIndex < stopDim; ++oldIndex) {
if (sizes[oldIndex] == 1) {
continue;
}
++newIndex;
sizes[newIndex] = sizes[oldIndex];
strides[newIndex] = strides[oldIndex];
++oldIndex;
break;
}
// Collapses dims
for (; oldIndex < stopDim; ++oldIndex) {
if (sizes[oldIndex] == 1) {
continue;
}
if (strides[newIndex] == sizes[oldIndex] * strides[oldIndex]) {
sizes[newIndex] *= sizes[oldIndex];
strides[newIndex] = strides[oldIndex];
} else {
++newIndex;
sizes[newIndex] = sizes[oldIndex];
strides[newIndex] = strides[oldIndex];
}
}
// Handles excludeDim being set (oldIndex == excludeDim)
if (oldIndex != dims) {
// Preserves excluded dimension
++newIndex;
sizes[newIndex] = sizes[oldIndex];
strides[newIndex] = strides[oldIndex];
remappedExcludedDim = newIndex;
// Restarts iteration after excludeDim
++oldIndex;
stopDim = dims;
}
}
// Handles special case of all dims size 1
if (newIndex == -1 || (newIndex == 0 && sizes[0] == 1)) {
dims = 1;
sizes[0] = 1;
strides[0] = 1;
return std::pair<int64_t, int64_t>(0, 1);
}
dims = newIndex + 1;
return std::pair<int64_t, int64_t>(remappedExcludedDim, dims);
}
} // namespace at
|
c
|
github
|
https://github.com/pytorch/pytorch
|
aten/src/ATen/CollapseDims.h
|
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_KERNELS_SEGMENT_REDUCTION_OPS_GPU_CU_H_
#define TENSORFLOW_CORE_KERNELS_SEGMENT_REDUCTION_OPS_GPU_CU_H_
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define EIGEN_USE_GPU
#include "tensorflow/core/common_runtime/gpu/gpu_event_mgr.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/kernels/gpu_prim.h"
#include "tensorflow/core/kernels/gpu_prim_helpers.h"
#include "tensorflow/core/kernels/segment_reduction_ops.h"
#include "tensorflow/core/lib/core/bits.h"
#include "tensorflow/core/util/determinism.h"
#include "tensorflow/core/util/env_var.h"
#include "tensorflow/core/util/gpu_device_functions.h"
#include "tensorflow/core/util/gpu_kernel_helper.h"
#include "tensorflow/core/util/gpu_solvers.h" // For ScratchSpace
#include "tensorflow/core/util/permutation_input_iterator.h"
#if (defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
#include "tensorflow/core/platform/rocm.h"
#endif
namespace tensorflow {
using GPUDevice = Eigen::GpuDevice;
// Non/Atomic reduction functors for the gpu.
#define DEFINE_REDUCE_UPDATE_OP_GPU(name, func) \
struct name##OpGpu { \
template <typename T> \
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(T* dest, \
const T& value) { \
func; \
} \
};
DEFINE_REDUCE_UPDATE_OP_GPU(AtomicSum, GpuAtomicAdd(dest, value))
DEFINE_REDUCE_UPDATE_OP_GPU(AtomicProd, GpuAtomicMul(dest, value))
DEFINE_REDUCE_UPDATE_OP_GPU(AtomicMax, GpuAtomicMax(dest, value))
DEFINE_REDUCE_UPDATE_OP_GPU(AtomicMin, GpuAtomicMin(dest, value))
DEFINE_REDUCE_UPDATE_OP_GPU(NonAtomicSum, *dest += value)
DEFINE_REDUCE_UPDATE_OP_GPU(NonAtomicProd, *dest *= value)
DEFINE_REDUCE_UPDATE_OP_GPU(NonAtomicMax, *dest = max(*dest, value))
DEFINE_REDUCE_UPDATE_OP_GPU(NonAtomicMin, *dest = min(*dest, value))
#undef DEFINE_REDUCE_UPDATE_OP_GPU
template <typename ReduceOp>
struct ReduceUpdateOpFor {};
#define DEFINE_REDUCE_UPDATE_OP_FOR(reduce_op, atomic, nonatomic) \
template <> \
struct ReduceUpdateOpFor<reduce_op> { \
using atomic_op = atomic; \
using nonatomic_op = nonatomic; \
};
DEFINE_REDUCE_UPDATE_OP_FOR(functor::Sum, AtomicSumOpGpu, NonAtomicSumOpGpu)
DEFINE_REDUCE_UPDATE_OP_FOR(functor::Prod, AtomicProdOpGpu, NonAtomicProdOpGpu)
DEFINE_REDUCE_UPDATE_OP_FOR(functor::Max, AtomicMaxOpGpu, NonAtomicMaxOpGpu)
DEFINE_REDUCE_UPDATE_OP_FOR(functor::Min, AtomicMinOpGpu, NonAtomicMinOpGpu)
#undef DEFINE_REDUCE_UPDATE_OP_FOR
// PR#61339: MSVC does not support compound-assignment operators on device
// SortedSegmentReductionFunctor kernel reduces input data just as
// UnsortedSegmentReductionCustomKernel does except that input data
// is partitioned along the outer reduction dimension. This is
// because consecutive rows (elements in a row share the same
// outer dimension index) in the flattened 2D input data likely
// belong to the same segment in sorted segment sum operation.
// Therefore such partitioning strategy has two advantages over
// the UnsortedSegmentReductionFunctor kernel:
// 1. Each thread reduces across multiple rows before writing
// answers to the global memory, we can therefore
// write reduction results to global memory less often.
// 2. We may know that the current thread is the only contributor
// to an output element because of the increasing nature of segment
// ids. In such cases, we do not need to use atomic operations
// to write results to global memory.
// In the flattened view of input data (with only outer and inner
// dimension), every thread processes a strip of input data of
// size OuterDimTileSize x 1. This strip runs across multiple
// rows of input data and all reduction elements share one inner
// dimension index.
template <typename T, typename Index, int OuterDimTileSize, typename ReductionF,
typename AtomicReductionF>
__global__ void SortedSegmentReductionCustomKernel(
const Index input_outer_dim_size, const Index inner_dim_size,
const Index output_outer_dim_size, const Index* __restrict__ segment_ids,
const T* __restrict__ input, T* __restrict__ output,
const Index total_stripe_count, const T initial_value) {
for (int stripe_index : GpuGridRangeX(total_stripe_count)) {
const Index segment_offset = stripe_index % inner_dim_size;
const Index input_outer_dim_index_base =
stripe_index / inner_dim_size * Index(OuterDimTileSize);
T reduce_res = initial_value;
Index first_segment_id = segment_ids[input_outer_dim_index_base];
Index last_output_segment_id = output_outer_dim_size;
const Index actual_stripe_height =
min(Index(OuterDimTileSize),
input_outer_dim_size - input_outer_dim_index_base);
for (Index j = 0; j < actual_stripe_height; j++) {
Index current_output_segment_id =
segment_ids[input_outer_dim_index_base + j];
// Decide whether to write result to global memory. Result is only written
// to global memory if we move to another segment. Otherwise we can keep
// accumulating locally.
if (current_output_segment_id > last_output_segment_id) {
const Index output_index =
last_output_segment_id * inner_dim_size + segment_offset;
// Decide whether to write result to global memory using atomic
// operations.
if (last_output_segment_id == first_segment_id) {
AtomicReductionF()(output + output_index, reduce_res);
} else {
ReductionF()(output + output_index, reduce_res);
}
reduce_res = initial_value;
}
ReductionF()(
&reduce_res,
ldg(input + (input_outer_dim_index_base + j) * inner_dim_size +
segment_offset));
last_output_segment_id = current_output_segment_id;
}
// For the last result in a strip, always write using atomic operations
// due to possible race conditions with threads computing
// the following strip.
const Index output_index =
last_output_segment_id * inner_dim_size + segment_offset;
AtomicReductionF()(output + output_index, reduce_res);
}
}
template <typename SegmentId, typename Index, typename T>
__global__ void SegmentMeanNormalizeKernel(
SegmentId nsegments, Index ninner,
const Index* __restrict__ segment_offsets, // [nsegments + 1]
T* __restrict__ output) { // [nsegments, ninner]
for (SegmentId seg : GpuGridRangeY(nsegments)) {
SegmentId segment_size = segment_offsets[seg + 1] - segment_offsets[seg];
segment_size = max(segment_size, Index(1)); // Avoid division by zero
T inv_norm = T(1) / static_cast<T>(segment_size);
for (Index i : GpuGridRangeX(ninner)) {
output[seg * ninner + i] *= inv_norm;
}
}
}
template <typename SegmentId, typename Index, typename T>
absl::Status LaunchSegmentMeanNormalizeKernel(
const GPUDevice& d, SegmentId nsegments, Index ninner,
const Index* __restrict__ segment_offsets, // [nsegments + 1]
T* __restrict__ output) { // [nsegments, ninner]
Gpu2DLaunchConfig config = GetGpu2DLaunchConfig(
ninner, nsegments, d, SegmentMeanNormalizeKernel<SegmentId, Index, T>,
/*dynamic_shared_memory_size=*/0, /*block_size_limit=*/0);
return GpuLaunchKernel(SegmentMeanNormalizeKernel<SegmentId, Index, T>,
config.block_count, config.thread_per_block, 0,
d.stream(), nsegments, ninner, segment_offsets,
output);
}
template <typename SegmentId, typename Index, typename T>
__global__ void SegmentSetEmptyKernel(
SegmentId nsegments, Index ninner,
const Index* __restrict__ segment_offsets, // [nsegments + 1]
const T empty_value,
T* __restrict__ output) { // [nsegments, ninner]
for (SegmentId seg : GpuGridRangeY(nsegments)) {
SegmentId segment_size = segment_offsets[seg + 1] - segment_offsets[seg];
if (segment_size == 0) {
for (Index i : GpuGridRangeX(ninner)) {
output[seg * ninner + i] = empty_value;
}
}
}
}
template <typename SegmentId, typename Index, typename T>
absl::Status LaunchSegmentSetEmptyKernel(
const GPUDevice& d, SegmentId nsegments, Index ninner,
const Index* __restrict__ segment_offsets, // [nsegments + 1]
const T empty_value,
T* __restrict__ output) { // [nsegments, ninner]
Gpu2DLaunchConfig config = GetGpu2DLaunchConfig(
ninner, nsegments, d, SegmentSetEmptyKernel<SegmentId, Index, T>,
/*dynamic_shared_memory_size=*/0, /*block_size_limit=*/0);
return GpuLaunchKernel(SegmentSetEmptyKernel<SegmentId, Index, T>,
config.block_count, config.thread_per_block, 0,
d.stream(), nsegments, ninner, segment_offsets,
empty_value, output);
}
// UnsortedSegmentSumKernel processes 'input_total_size' elements.
// Each element is mapped from input to output by a combination of its
// 'segment_ids' mapping and 'inner_dim_size'.
template <typename T, typename Index, typename KernelReductionFunctor>
__global__ void UnsortedSegmentCustomKernel(
const int64_t input_outer_dim_size, const int64_t inner_dim_size,
const int64_t output_outer_dim_size, const Index* __restrict__ segment_ids,
const T* __restrict__ input, T* __restrict__ output) {
const int64_t input_total_size = input_outer_dim_size * inner_dim_size;
for (int64_t input_index : GpuGridRangeX(input_total_size)) {
const int64_t input_segment_index = input_index / inner_dim_size;
const int64_t segment_offset = input_index % inner_dim_size;
const Index output_segment_index = segment_ids[input_segment_index];
if (output_segment_index < 0 ||
output_segment_index >= output_outer_dim_size) {
continue;
}
const int64_t output_index =
output_segment_index * inner_dim_size + segment_offset;
KernelReductionFunctor()(output + output_index, ldg(input + input_index));
}
}
template <typename Toffsets, typename Tsegmentids>
__global__ void SegmentOffsetsKernel(
Toffsets size, Tsegmentids nsegments,
const Tsegmentids* __restrict__ segment_ids, // [size]
Toffsets* __restrict__ segment_offsets) { // [nsegments + 1]
GPU_1D_KERNEL_LOOP(i, size + 1) {
// IDs are clipped to [-1, nsegments] so that out-of-bounds IDs are ignored.
// Note that we can't report invalid IDs from the GPU without incurring
// additional overhead.
auto clip = [&](Tsegmentids id) {
return min(max(Tsegmentids(-1), id), nsegments);
};
const Tsegmentids cur_id = (i < size) ? clip(segment_ids[i]) : nsegments;
const Tsegmentids prev_id =
(i == 0) ? Tsegmentids(-1) : clip(segment_ids[i - 1]);
// At segment boundaries, write the offset for this ID and any missing IDs
// since the previous one.
for (Tsegmentids id = prev_id + 1; id <= cur_id; ++id) {
segment_offsets[id] = i;
}
}
}
// Finds the start offset of each segment in the given sorted segment_ids
// vector. Missing IDs are given the same offset as the next ID so that they
// represent empty ranges. Invalid IDs (those that are outside the range
// [0, nsegments)) are ignored. The value at segment_offsets[0] is set to the
// start index of the first valid ID (e.g., 0 if all IDs are valid), and the
// value at segment_offsets[nsegments] is set to the end index of the last valid
// ID (e.g., nsegments if all IDs are valid).
template <typename Toffsets, typename Tsegmentids>
absl::Status LaunchSegmentOffsetsKernel(
const GPUDevice& d, Toffsets size, Tsegmentids nsegments,
const Tsegmentids* segment_ids, // [size]
Toffsets* segment_offsets) { // [nsegments + 1]
GpuLaunchConfig config = GetGpuLaunchConfig(
size + 1, d, &SegmentOffsetsKernel<Toffsets, Tsegmentids>,
/*dynamic_shared_memory_size=*/0, /*block_size_limit=*/0);
return GpuLaunchKernel(SegmentOffsetsKernel<Toffsets, Tsegmentids>,
config.block_count, config.thread_per_block, 0,
d.stream(), size, nsegments, segment_ids,
segment_offsets);
}
template <typename T>
struct RealTypeIfComplex {
using type = T;
};
template <typename Real>
struct RealTypeIfComplex<std::complex<Real>> {
using type = Real;
};
// Reduces along columns of the thread block, returning the result in the first
// row of threads.
template <typename T, typename ReduceOp>
__device__ T ReduceBlockAlongCols(ReduceOp reduce_op, const T& value,
bool is_valid) {
GPU_DYNAMIC_SHARED_MEM_DECL(/*ALIGN=*/16, char, shared_memory_raw);
T* const shared_partial_reduction =
reinterpret_cast<T*>(shared_memory_raw); // [blockDim.y, blockDim.x]
const int x = threadIdx.x;
const int y = threadIdx.y;
T reduced = value;
// Reduce over the y dimension of the block.
for (unsigned k = blockDim.y / 2; k > 0; k /= 2) {
if (is_valid && y < 2 * k) {
shared_partial_reduction[y * blockDim.x + x] = reduced;
}
__syncthreads();
if (is_valid && y < k) {
reduced = reduce_op(reduced,
shared_partial_reduction[(y + k) * blockDim.x + x]);
}
__syncthreads();
}
return reduced;
}
// This kernel uses a 2D thread decomposition. The x dimension maps to the inner
// dimension of the input/output. The y grid dimension maps to segments, and y
// threads within a block cooperate to reduce over the block's segment.
// Note that Tinit is needed because Tvec and Treducevec may be vector types,
// but Tinit is always a scalar type.
// Note that the first dimension of input_vec is nouter if indices is not
// provided; otherwise it is indexed indirectly via indices and can have any
// size (as long as it spans at least the maximum value in indices). This also
// applies to the weights vector.
template <typename Treducevec, typename Tvec, typename Toffsets,
typename Tindices, typename Tsegmentids, typename ReduceOp,
typename Tinit, typename Tweights>
__global__ void SegmentReduceVectorKernel(
Toffsets nouter, Toffsets ninner_vec, Tsegmentids nsegments,
ReduceOp reduce_op, Tinit initial_value, Tinit empty_segment_value,
bool is_mean, bool is_sqrtn,
const Tvec* __restrict__ input_vec, // [nouter or any, ninner_vec]
const Toffsets* __restrict__ segment_offsets, // [nsegments + 1]
const Tindices* __restrict__ indices, // [nouter] (optional)
const Tweights* __restrict__ weights, // [nouter or any] (optional)
Tvec* __restrict__ output_vec) { // [nsegments, ninner_vec]
const int num_blocks_x = (ninner_vec - 1) / blockDim.x + 1;
// Grid-stride loop over inner dimension blocks.
for (Toffsets blk_x = blockIdx.x; blk_x < num_blocks_x; blk_x += gridDim.x) {
const Toffsets x = threadIdx.x + blk_x * blockDim.x;
const Toffsets y = threadIdx.y;
const bool x_ok = x < ninner_vec;
// Grid-stride loop over segment blocks, each processing one segment.
for (Tsegmentids seg = blockIdx.y; seg < nsegments; seg += gridDim.y) {
// Load segment range.
const Toffsets begin = segment_offsets[seg];
const Toffsets end = segment_offsets[seg + 1];
// Reduce over the segment.
Treducevec result = Treducevec(initial_value);
// Loop over the segment, reducing blockDim.y elements at a time.
for (Toffsets y_offset = begin; y_offset < end; y_offset += blockDim.y) {
const bool y_ok = (y_offset + y) < end;
// Perform indirect lookup if required.
const Toffsets y_idx =
indices && y_ok ? indices[y_offset + y] : y_offset + y;
const int64_t input_idx = static_cast<int64_t>(y_idx) * ninner_vec + x;
// Load the input row from global mem.
Treducevec block_result =
x_ok && y_ok ? input_vec[input_idx] : Tvec(initial_value);
// Apply weights if provided.
if (weights && y_ok) block_result = block_result * Tvec(weights[y_idx]);
// Reduce along the columns of the block, returning result in first row.
block_result = ReduceBlockAlongCols(reduce_op, block_result, x_ok);
if (y == 0 && x_ok) {
result = reduce_op(result, block_result);
}
}
// First row of the block stores the result to global memory.
if (y == 0 && x_ok) {
if (begin == end) {
// Empty segment.
result = Treducevec(empty_segment_value);
} else {
Tweights total_weight(end - begin);
// Normalize the results if necessary.
if (is_mean) {
result = result / Treducevec(total_weight);
} else if (is_sqrtn) {
result =
result / Treducevec(sqrt(static_cast<double>(total_weight)));
}
}
// Cast from Treducevec to Tvec.
const int64_t output_idx = static_cast<int64_t>(seg) * ninner_vec + x;
output_vec[output_idx] = static_cast<Tvec>(result);
}
}
}
}
// Reduces input matrix within segments over the outer dimension. Empty segments
// always output empty_segment_value.
// If is_mean or is_sqrtn is true, the results are normalized using the
// corresponding function.
// If indices is not nullptr, input rows are accessed indirectly as
// input[indices[i]], instead of input[i].
// Note: Treducevec is to allow reducing in higher precision than Tvec.
template <typename Treducevec, typename Tvec, typename Toffsets,
typename Tindices, typename Tsegmentids, typename ReduceOp,
typename Tinit, typename Tweights>
absl::Status LaunchSegmentReduceVectorKernel(
const GPUDevice& d, Toffsets nouter, Toffsets ninner_vec,
Tsegmentids nsegments, ReduceOp reduce_op, Tinit initial_value,
Tinit empty_segment_value, bool is_mean, bool is_sqrtn,
const Tvec* input_vec, // [nouter or any, ninner_vec]
const Toffsets* segment_offsets, // [nsegments + 1]
const Tindices* indices, // [nouter] (optional)
const Tweights* weights, // [nouter or any] (optional)
Tvec* output_vec) { // [nsegments, ninner_vec]
static constexpr const int kMaxGridX = (1u << 31) - 1;
static constexpr const int kMaxGridY = (1u << 16) - 1;
const int max_block_size = 1024; // Can be tuned for perf (<= 1024)
const int min_block_size = 64; // Can be tuned for perf
const Toffsets ninner_pow2 = Toffsets(1) << Log2Ceiling64(ninner_vec);
// This is a heuristic that first allocates threads in the block to the inner
// (x) dimension (which is most efficient) and then allocates the rest to the
// reduction (y) dimension (which is less efficient but increases
// parallelism).
int block_x = std::min(ninner_pow2, static_cast<Toffsets>(max_block_size));
const Toffsets avg_reduce_size =
Eigen::divup(nouter, static_cast<Toffsets>(nsegments));
const Toffsets avg_reduce_size_pow2 = Toffsets(1)
<< Log2Ceiling64(avg_reduce_size);
dim3 block(
block_x,
std::min(static_cast<Toffsets>(Eigen::divup(min_block_size, block_x)),
avg_reduce_size_pow2));
dim3 grid(std::min(Eigen::divup(ninner_vec, static_cast<Toffsets>(block.x)),
static_cast<Toffsets>(kMaxGridX)),
std::min(nsegments, static_cast<Tsegmentids>(kMaxGridY)));
unsigned shared_memory_bytes = block.x * block.y * sizeof(Treducevec);
return GpuLaunchKernel(
SegmentReduceVectorKernel<Treducevec, Tvec, Toffsets, Tindices,
Tsegmentids, ReduceOp, Tinit, Tweights>,
grid, block, shared_memory_bytes, d.stream(), nouter, ninner_vec,
nsegments, reduce_op, initial_value, empty_segment_value, is_mean,
is_sqrtn, input_vec, segment_offsets, indices, weights, output_vec);
}
template <typename Tvec, typename Treducevec, typename Toffsets,
typename Tsegmentids, typename Tinit>
__global__ void SegmentReduceEpilogueKernel(
Tsegmentids nsegments, Tinit empty_segment_value, bool is_mean,
bool is_sqrtn,
const Treducevec* __restrict__ output_raw, // [nsegments]
const Toffsets* __restrict__ segment_offsets, // [nsegments + 1]
Tvec* __restrict__ output) { // [nsegments]
GPU_1D_KERNEL_LOOP(seg, nsegments) {
Toffsets segment_size = segment_offsets[seg + 1] - segment_offsets[seg];
Treducevec val = output_raw[seg];
if (segment_size == 0) {
// Empty segment.
val = Treducevec(empty_segment_value);
} else if (is_mean) {
val = val / Treducevec(segment_size);
} else if (is_sqrtn) {
val = val / Treducevec(sqrt(static_cast<double>(
typename RealTypeIfComplex<Tinit>::type(segment_size))));
}
// Cast from Treducevec to Tvec.
output[seg] = static_cast<Tvec>(val);
}
}
// Normalizes output_raw based on segment size and casts from Treducevec to
// Tvec. If Tvec == Treducevec, this is safe to call with output_raw == output.
// Note that Treducevec is the type that was used for the reduction, which may
// be a higher-precision type than the output type Tvec (e.g., float vs. half).
template <typename Tvec, typename Treducevec, typename Toffsets,
typename Tsegmentids, typename Tinit>
absl::Status LaunchSegmentReduceEpilogueKernel(
const GPUDevice& d, Tsegmentids nsegments, Tinit empty_segment_value,
bool is_mean, bool is_sqrtn,
const Treducevec* output_raw, // [nsegments]
const Toffsets* segment_offsets, // [nsegments + 1]
Tvec* output) { // [nsegments]
GpuLaunchConfig config = GetGpuLaunchConfig(
nsegments, d,
&SegmentReduceEpilogueKernel<Tvec, Treducevec, Toffsets, Tsegmentids,
Tinit>,
/*dynamic_shared_memory_size=*/0, /*block_size_limit=*/0);
return GpuLaunchKernel(SegmentReduceEpilogueKernel<Tvec, Treducevec, Toffsets,
Tsegmentids, Tinit>,
config.block_count, config.thread_per_block, 0,
d.stream(), nsegments, empty_segment_value, is_mean,
is_sqrtn, output_raw, segment_offsets, output);
}
template <typename Tto>
struct CastFunctor {
template <typename T>
__device__ Tto operator()(const T& val) const {
return static_cast<Tto>(val);
}
};
template <typename Treducevec, typename Tvec, typename Tindices,
typename Tweights>
struct LookupAndScaleAndCastInputsFunctor {
LookupAndScaleAndCastInputsFunctor(const Tvec* input_vec,
const Tindices* indices,
const Tweights* weights)
: input_vec_(input_vec), indices_(indices), weights_(weights) {}
template <typename Toffsets>
__device__ Treducevec operator()(Toffsets idx) const {
if (indices_) idx = indices_[idx];
Treducevec result = static_cast<Treducevec>(input_vec_[idx]);
if (weights_) result = result * Tvec(weights_[idx]);
return result;
}
private:
const Tvec* __restrict__ input_vec_;
const Tindices* __restrict__ indices_;
const Tweights* __restrict__ weights_;
};
template <typename Treducevec, typename Tvec, typename Toffsets,
typename Tindices, typename Tweights>
struct CastIterator {
using FunctorTy =
LookupAndScaleAndCastInputsFunctor<Treducevec, Tvec, Tindices, Tweights>;
using InputIteratorTy = gpuprim::CountingInputIterator<Toffsets>;
using IteratorTy =
gpuprim::TransformInputIterator<Treducevec, FunctorTy, InputIteratorTy>;
};
template <typename Treducevec, typename Toffsets, typename Tvec,
typename Tindices, typename Tweights>
typename CastIterator<Treducevec, Tvec, Toffsets, Tindices,
Tweights>::IteratorTy
MakeLookupAndScaleAndCastInputsIterator(const Tvec* input_vec,
const Tindices* indices,
const Tweights* weights) {
using CastIteratorTy =
CastIterator<Treducevec, Tvec, Toffsets, Tindices, Tweights>;
typename CastIteratorTy::FunctorTy functor(input_vec, indices, weights);
return typename CastIteratorTy::IteratorTy(
typename CastIteratorTy::InputIteratorTy(Toffsets(0)), functor);
}
template <typename Treducevec, typename Tvec, typename Toffsets,
typename Tindices, typename Tsegmentids, typename ReduceOp,
typename Tinit, typename Tweights>
absl::Status SegmentReduceGPUImplNoInnerDim(
OpKernelContext* ctx, Toffsets nouter, Tsegmentids nsegments,
ReduceOp reduce_op, Tinit initial_value, Tinit empty_segment_value,
bool is_mean, bool is_sqrtn,
const Tvec* input_vec, // [nouter or any]
const Toffsets* segment_offsets, // [nsegments + 1]
const Tindices* indices, // [nouter] (optional)
const Tweights* weights, // [nouter or any] (optional)
Tvec* output_vec) { // [nsegments]
// Here we use gpuprim::DeviceSegmentedReduce (which is optimized for this
// shape) and add the additional required functionality using fancy input
// iterators and an epilogue kernel.
// Note: This reinterpret cast is only needed to avoid compilation error
// when Tvec != Treducevec; the result is only used if Tvec == Treducevec.
Treducevec* output_raw_ptr = reinterpret_cast<Treducevec*>(output_vec);
Tensor output_raw;
bool need_temp_output = !std::is_same<Tvec, Treducevec>::value;
if (need_temp_output) {
// Note: We must allocate and reinterpret as bytes because Treducevec may
// be a vector type and they are not supported as Tensor dtypes.
TF_RETURN_IF_ERROR(ctx->allocate_temp(
DT_INT8,
TensorShape({static_cast<int64_t>(nsegments * sizeof(Treducevec))}),
&output_raw));
output_raw_ptr =
reinterpret_cast<Treducevec*>(output_raw.flat<int8_t>().data());
}
auto input_iter =
MakeLookupAndScaleAndCastInputsIterator<Treducevec, Toffsets>(
input_vec, indices, weights);
TF_RETURN_IF_ERROR(GpuSegmentedReduce(ctx, nsegments, reduce_op,
Treducevec(initial_value), input_iter,
segment_offsets, output_raw_ptr));
bool need_epilogue = !std::is_same<Tvec, Treducevec>::value ||
initial_value != empty_segment_value || is_mean ||
is_sqrtn;
if (need_epilogue) {
const GPUDevice& device = ctx->eigen_gpu_device();
// Normalize based on the segment size and cast results back to T.
TF_RETURN_IF_ERROR(LaunchSegmentReduceEpilogueKernel(
device, nsegments, empty_segment_value, is_mean, is_sqrtn,
output_raw_ptr, segment_offsets, output_vec));
}
return absl::OkStatus();
}
template <typename Treducevec, typename Tvec, typename Toffsets,
typename Tindices, typename Tsegmentids, typename ReduceOp,
typename Tinit, typename Tweights>
absl::Status SegmentReduceGPUImpl(
OpKernelContext* ctx, Toffsets nouter, Toffsets ninner_vec,
Tsegmentids nsegments, ReduceOp reduce_op, Tinit initial_value,
Tinit empty_segment_value, bool is_mean, bool is_sqrtn,
const Tvec* input_vec, // [nouter or any, ninner_vec]
const Tsegmentids* segment_ids, // [nouter]
const Tindices* indices, // [nouter] (optional)
const Tweights* weights, // [nouter or any] (optional)
Tvec* output_vec) { // [nsegments, ninner_vec]
const GPUDevice& device = ctx->eigen_gpu_device();
if (nouter == 0) {
// Just set output to empty_segment_value.
GPUDevice d = ctx->template eigen_device<GPUDevice>();
int64_t output_size = static_cast<int64_t>(nsegments) * ninner_vec;
GpuLaunchConfig config = GetGpuLaunchConfig(output_size, d);
return GpuLaunchKernel(SetToValue<Tvec, Tinit>, config.block_count,
config.thread_per_block, 0, d.stream(), output_size,
output_vec, empty_segment_value);
}
// Allocate and compute segment_offsets.
Tensor segment_offsets;
TF_RETURN_IF_ERROR(ctx->allocate_temp(DataTypeToEnum<Toffsets>::value,
TensorShape({nsegments + 1}),
&segment_offsets));
Toffsets* segment_offsets_ptr = segment_offsets.flat<Toffsets>().data();
TF_RETURN_IF_ERROR(LaunchSegmentOffsetsKernel(
device, nouter, nsegments, segment_ids, segment_offsets_ptr));
const Toffsets avg_reduce_size =
Eigen::divup(nouter, static_cast<Toffsets>(nsegments));
// This avg_reduce_size threshold is a performance heuristic.
if (ninner_vec == 1 && avg_reduce_size >= 512) {
// Here we use a gpuprim-based implementation that doesn't support an
// inner dimension but can be significantly faster for large reductions.
return SegmentReduceGPUImplNoInnerDim<Treducevec>(
ctx, nouter, nsegments, reduce_op, initial_value, empty_segment_value,
is_mean, is_sqrtn, input_vec, segment_offsets_ptr, indices, weights,
output_vec);
}
// Here we use a custom kernel that is optimized for ninner_vec >= ~64 and
// gives decent performance for smaller cases. It also handles indices,
// casting to/from Treducevec, and normalizing the output.
return LaunchSegmentReduceVectorKernel<Treducevec>(
device, nouter, ninner_vec, nsegments, reduce_op, initial_value,
empty_segment_value, is_mean, is_sqrtn, input_vec, segment_offsets_ptr,
indices, weights, output_vec);
}
template <typename Treduce>
struct SegmentReduceGPUVectorized {
template <int vec_size>
struct Impl {
template <typename T, typename Toffsets, typename Tindices,
typename Tsegmentids, typename ReduceOp, typename Tweights>
absl::Status operator()(OpKernelContext* ctx, Toffsets nouter,
Toffsets ninner, Tsegmentids nsegments,
ReduceOp reduce_op, T initial_value,
T empty_segment_value, bool is_mean, bool is_sqrtn,
const T* input, const Tsegmentids* segment_ids,
const Tindices* indices, const Tweights* weights,
T* output) {
DCHECK_EQ(ninner % vec_size, 0);
DCHECK_EQ(reinterpret_cast<std::uintptr_t>(input) % vec_size, 0);
DCHECK_EQ(reinterpret_cast<std::uintptr_t>(output) % vec_size, 0);
Toffsets ninner_vec = ninner / vec_size;
using Tvec = AlignedVector<T, vec_size>;
using Treducevec = AlignedVector<Treduce, vec_size>;
const Tvec* input_vec = reinterpret_cast<const Tvec*>(input);
Tvec* output_vec = reinterpret_cast<Tvec*>(output);
return SegmentReduceGPUImpl<Treducevec>(
ctx, nouter, ninner_vec, nsegments, reduce_op, initial_value,
empty_segment_value, is_mean, is_sqrtn, input_vec, segment_ids,
indices, weights, output_vec);
}
};
};
// Reduces input matrix within segments over the outer dimension. Empty segments
// always output empty_segment_value.
// The segment_ids vector must be sorted.
// If is_mean or is_sqrtn is true, the results are normalized using the
// corresponding function.
// If indices is not nullptr, input rows are accessed indirectly as
// input[indices[i]], instead of input[i].
// The implementation is deterministic.
// Note: Treduce is to allow reducing in higher precision than T.
template <typename Treduce, typename T, typename Toffsets, typename Tindices,
typename Tsegmentids, typename ReduceOp, typename Tweights>
absl::Status SegmentReduceGPU(
OpKernelContext* ctx, Toffsets nouter, Toffsets ninner,
Tsegmentids nsegments, ReduceOp reduce_op, T initial_value,
T empty_segment_value, bool is_mean, bool is_sqrtn,
const T* input, // [nouter or any, ninner]
const Tsegmentids* segment_ids, // [nouter]
const Tindices* indices, // [nouter] (optional)
const Tweights* weights, // [nouter or any] (optional)
T* output) { // [nsegments, ninner]
if (ninner == 0 || nsegments == 0) return absl::OkStatus();
return DispatchToVectorized<
T, SegmentReduceGPUVectorized<Treduce>::template Impl>(
MinAlignmentOf(input, output, ninner), ctx, nouter, ninner, nsegments,
reduce_op, initial_value, empty_segment_value, is_mean, is_sqrtn, input,
segment_ids, indices, weights, output);
}
template <typename SegmentId, typename Index, typename Tweights>
__global__ void SegmentWeightsKernel(
SegmentId nsegments, SparseSegmentReductionOperation operation,
const Index* __restrict__ segment_offsets, // [nsegments + 1]
Tweights* __restrict__ weights) { // [nsegments]
GPU_1D_KERNEL_LOOP(i, nsegments) {
Index segment_size = segment_offsets[i + 1] - segment_offsets[i];
segment_size = max(segment_size, Index(1)); // Avoid division by zero
if (operation == SparseSegmentReductionOperation::kMean) {
weights[i] = Tweights(1) / static_cast<Tweights>(segment_size);
} else if (operation == SparseSegmentReductionOperation::kSqrtN) {
weights[i] = Tweights(1) / sqrt(static_cast<Tweights>(segment_size));
}
}
}
template <typename SegmentId, typename Index, typename Tweights>
absl::Status LaunchSegmentWeightsKernel(
const GPUDevice& d, SegmentId nsegments,
SparseSegmentReductionOperation operation,
const Index* segment_offsets, // [nsegments + 1]
Tweights* weights) { // [nsegments]
GpuLaunchConfig config = GetGpuLaunchConfig(
nsegments, d, &SegmentWeightsKernel<SegmentId, Index, Tweights>,
/*dynamic_shared_memory_size=*/0, /*block_size_limit=*/0);
return GpuLaunchKernel(SegmentWeightsKernel<SegmentId, Index, Tweights>,
config.block_count, config.thread_per_block, 0,
d.stream(), nsegments, operation, segment_offsets,
weights);
}
template <typename ReduceOp, typename T>
struct ReduceType {
using type = T;
};
// Sum fp16 values using an fp32 accumulator to avoid numerical issues.
template <>
struct ReduceType<functor::Sum, Eigen::half> {
using type = float;
};
template <>
struct ReduceType<functor::Sum, Eigen::bfloat16> {
using type = float;
};
namespace functor {
template <typename T, typename Index, typename InitialValueF,
typename EmptySegmentValueF, typename ReductionF>
void SegmentReductionFunctor<
T, Index, InitialValueF, EmptySegmentValueF,
ReductionF>::operator()(OpKernelContext* ctx, const GPUDevice& d,
const Index output_rows,
const TensorShape& segment_ids_shape, bool is_mean,
typename TTypes<Index>::ConstFlat segment_ids,
const Index data_size, const T* data,
typename TTypes<T, 2>::Tensor output) {
if (output.size() == 0) {
return;
}
// Launch kernel(s) to compute sorted segment reduction.
// Notes:
// *) 'input_total_size' is the total number of elements to process.
// *) 'segment_ids.shape' is a prefix of data's shape.
// *) 'input_outer_dim_size' is the total number of segments to process.
const Index input_total_size = data_size;
const Index input_outer_dim_size = segment_ids.dimension(0);
const Index input_inner_dim_size = input_total_size / input_outer_dim_size;
const Index num_segments = output.size() / input_inner_dim_size;
bool use_deterministic_kernels =
UseDeterministicSegmentReductions() ||
(OpDeterminismRequired() && !ReduceOpIsAssociative<ReductionF, T>::value);
// TODO(benbarsdell): If there are no performance concerns with the new
// deterministic kernels, remove this runtime check and the old
// non-deterministic kernels.
if (!use_deterministic_kernels) {
// Set 'output' to initial value.
GpuLaunchConfig config = GetGpuLaunchConfig(output.size(), d);
const T initial_value = InitialValueF()();
TF_CHECK_OK(GpuLaunchKernel(SetToValue<T>, config.block_count,
config.thread_per_block, 0, d.stream(),
output.size(), output.data(), initial_value));
if (data_size == 0 || segment_ids_shape.num_elements() == 0) {
return;
}
const int OuterDimTileSize = 8;
const Index input_outer_dim_num_stripe =
Eigen::divup(input_outer_dim_size, Index(OuterDimTileSize));
const Index total_stripe_count =
input_inner_dim_size * input_outer_dim_num_stripe;
config = GetGpuLaunchConfig(total_stripe_count, d);
TF_CHECK_OK(GpuLaunchKernel(
SortedSegmentReductionCustomKernel<
T, Index, OuterDimTileSize,
typename ReduceUpdateOpFor<ReductionF>::nonatomic_op,
typename ReduceUpdateOpFor<ReductionF>::atomic_op>,
config.block_count, config.thread_per_block, 0, d.stream(),
input_outer_dim_size, input_inner_dim_size, output_rows,
segment_ids.data(), data, output.data(), total_stripe_count,
initial_value));
const T empty_value = EmptySegmentValueF()();
if (is_mean || initial_value != empty_value) {
Tensor segment_offsets;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<Index>::value,
TensorShape({num_segments + 1}),
&segment_offsets));
Index* segment_offsets_ptr = segment_offsets.flat<Index>().data();
OP_REQUIRES_OK(ctx, LaunchSegmentOffsetsKernel(
d, input_outer_dim_size, num_segments,
segment_ids.data(), segment_offsets_ptr));
if (is_mean) {
OP_REQUIRES_OK(ctx, LaunchSegmentMeanNormalizeKernel(
d, num_segments, input_inner_dim_size,
segment_offsets_ptr, output.data()));
}
if (initial_value != empty_value) {
OP_REQUIRES_OK(
ctx, LaunchSegmentSetEmptyKernel(
d, num_segments, input_inner_dim_size, segment_offsets_ptr,
empty_value, output.data()));
}
}
} else {
using Treduce = typename ReduceType<ReductionF, T>::type;
using Tweights = typename RealTypeIfComplex<T>::type;
OP_REQUIRES_OK(
ctx,
SegmentReduceGPU<Treduce>(
ctx, input_outer_dim_size, input_inner_dim_size, num_segments,
ReductionF(), InitialValueF()(), EmptySegmentValueF()(),
/*is_mean=*/is_mean, /*is_sqrtn=*/false, data, segment_ids.data(),
/*indices=*/static_cast<const Index*>(nullptr),
/*weights=*/static_cast<Tweights*>(nullptr), output.data()));
}
}
template <typename T, typename Index, typename InitialValueF,
typename ReductionF>
struct UnsortedSegmentFunctor<GPUDevice, T, Index, InitialValueF, ReductionF> {
void operator()(OpKernelContext* ctx, const TensorShape& segment_ids_shape,
typename TTypes<Index>::ConstFlat unsorted_segment_ids,
typename TTypes<T, 2>::ConstTensor data,
typename TTypes<T, 2>::Tensor output) {
if (output.size() == 0) {
return;
}
bool use_deterministic_kernels =
UseDeterministicSegmentReductions() ||
(!ReduceOpIsAssociative<ReductionF, T>::value &&
OpDeterminismRequired());
bool determinism_requirement_met =
use_deterministic_kernels ||
ReduceOpIsAssociative<ReductionF, T>::value ||
!OpDeterminismRequired() ||
DisableSegmentReductionOpDeterminismExceptions();
OP_REQUIRES(
ctx, determinism_requirement_met,
errors::Unimplemented(
"Deterministic GPU implementation of unsorted segment reduction op"
" not available."));
// Launch kernel(s) to compute unsorted segment reduction.
// Notes:
// *) 'data_size' is the total number of elements to process.
// *) 'segment_ids.shape' is a prefix of data's shape.
// *) 'input_outer_dim_size' is the total number of segments to process.
const Index input_outer_dim_size = unsorted_segment_ids.dimension(0);
const Index input_inner_dim_size = data.dimension(1);
const Index output_outer_dim_size = output.dimension(0);
const Index num_segments = output.size() / input_inner_dim_size;
// TODO(benbarsdell): If there are no performance concerns with the new
// deterministic kernels, remove this runtime check and the old
// non-deterministic kernels.
if (!use_deterministic_kernels) {
// Set 'output' to initial value.
GPUDevice d = ctx->template eigen_device<GPUDevice>();
GpuLaunchConfig config = GetGpuLaunchConfig(output.size(), d);
TF_CHECK_OK(GpuLaunchKernel(
SetToValue<T>, config.block_count, config.thread_per_block, 0,
d.stream(), output.size(), output.data(), InitialValueF()()));
const int64_t data_size = data.size();
if (data_size == 0 || segment_ids_shape.num_elements() == 0) {
return;
}
config = GetGpuLaunchConfig(data_size, d);
TF_CHECK_OK(GpuLaunchKernel(
UnsortedSegmentCustomKernel<
T, Index, typename ReduceUpdateOpFor<ReductionF>::atomic_op>,
config.block_count, config.thread_per_block, 0, d.stream(),
input_outer_dim_size, input_inner_dim_size, output_outer_dim_size,
unsorted_segment_ids.data(), data.data(), output.data()));
} else {
// Allocate temporary space and sort segment_ids, then call the sorted
// implem.
Tensor segment_ids;
OP_REQUIRES_OK(
ctx, ctx->allocate_temp(
DataTypeToEnum<Index>::value,
TensorShape({static_cast<int64_t>(input_outer_dim_size)}),
&segment_ids));
Index* segment_ids_ptr = segment_ids.flat<Index>().data();
Tensor sorted_indices;
OP_REQUIRES_OK(
ctx, ctx->allocate_temp(
DataTypeToEnum<Index>::value,
TensorShape({static_cast<int64_t>(input_outer_dim_size)}),
&sorted_indices));
Index* sorted_indices_ptr = sorted_indices.flat<Index>().data();
// Note: We must sort using all bits here because unsorted_segment_ids
// may contain negative values.
OP_REQUIRES_OK(
ctx, GpuRadixSort(ctx, input_outer_dim_size,
/*keys_in=*/unsorted_segment_ids.data(),
/*keys_out=*/segment_ids_ptr,
/*indices_in=*/static_cast<const Index*>(nullptr),
/*indices_out=*/sorted_indices_ptr));
using Treduce = typename ReduceType<ReductionF, T>::type;
using Tweights = typename RealTypeIfComplex<T>::type;
OP_REQUIRES_OK(
ctx,
SegmentReduceGPU<Treduce>(
ctx, input_outer_dim_size, input_inner_dim_size, num_segments,
ReductionF(), /*initial_value=*/InitialValueF()(),
/*empty_segment_value=*/InitialValueF()(), /*is_mean=*/false,
/*is_sqrtn=*/false, /*input=*/data.data(),
/*segment_ids=*/segment_ids_ptr, /*indices=*/sorted_indices_ptr,
/*weights=*/static_cast<Tweights*>(nullptr), output.data()));
}
}
};
template <typename T, typename Index, typename SegmentId>
absl::Status SparseSegmentReductionFunctor<T, Index, SegmentId>::operator()(
OpKernelContext* context, bool is_mean, bool is_sqrtn, T default_value,
typename TTypes<T, 2>::ConstTensor input,
typename TTypes<Index>::ConstVec indices,
typename TTypes<SegmentId>::ConstVec segment_ids,
typename TTypes<T, 2>::Tensor output) {
using ReduceOp = functor::Sum;
using Treduce = typename ReduceType<ReduceOp, T>::type;
using Tweights = typename RealTypeIfComplex<T>::type;
Index nouter = segment_ids.size();
Index ninner = input.dimension(1);
SegmentId nsegments = output.dimension(0);
return SegmentReduceGPU<Treduce>(
context, /*nouter=*/nouter, /*ninner=*/ninner,
/*nsegments=*/nsegments, /*reduce_op=*/ReduceOp(),
/*initial_value=*/T(0),
/*empty_segment_value=*/default_value,
/*is_mean=*/is_mean, /*is_sqrtn=*/is_sqrtn,
/*input=*/input.data(), /*segment_ids=*/segment_ids.data(),
/*indices=*/indices.data(), /*weights=*/static_cast<Tweights*>(nullptr),
/*output=*/output.data());
}
template <typename T, typename Index, typename SegmentId>
struct SparseSegmentGradFunctor<GPUDevice, T, Index, SegmentId> {
void operator()(OpKernelContext* context,
SparseSegmentReductionOperation operation,
typename TTypes<T>::ConstMatrix input_flat,
typename TTypes<Index>::ConstVec indices_vec,
typename TTypes<SegmentId>::ConstVec segment_vec,
Tensor* output) {
const GPUDevice& device = context->eigen_gpu_device();
auto output_flat = output->flat_outer_dims<T>();
const SegmentId nsegments = input_flat.dimension(0);
const Index ninner = input_flat.dimension(1);
const Index nouter = indices_vec.dimension(0);
const Index noutput = output_flat.dimension(0);
// Allocate and compute segment weights (for Mean/SqrtN operations only).
Tensor weights;
using Tweights = typename RealTypeIfComplex<T>::type;
Tweights* weights_ptr = nullptr;
if (operation != SparseSegmentReductionOperation::kSum) {
OP_REQUIRES_OK(
context, context->allocate_temp(DataTypeToEnum<Tweights>::value,
TensorShape({nsegments}), &weights));
weights_ptr = weights.flat<Tweights>().data();
// Allocate and compute segment_offsets.
Tensor segment_offsets;
OP_REQUIRES_OK(context,
context->allocate_temp(DataTypeToEnum<Index>::value,
TensorShape({nsegments + 1}),
&segment_offsets));
Index* segment_offsets_ptr = segment_offsets.flat<Index>().data();
OP_REQUIRES_OK(context, LaunchSegmentOffsetsKernel(
device, nouter, nsegments, segment_vec.data(),
segment_offsets_ptr));
// Compute the weights based on the segment sizes using segment_offsets.
OP_REQUIRES_OK(context, LaunchSegmentWeightsKernel(
device, nsegments, operation,
segment_offsets_ptr, weights_ptr));
}
const Index* sorted_indices_ptr = indices_vec.data();
const SegmentId* sorted_segment_ptr = segment_vec.data();
Tensor tmp_sorted_indices;
Tensor tmp_sorted_segment;
if (noutput > 1) {
// Sort indices and permute segments.
OP_REQUIRES_OK(context, context->allocate_temp(
DataTypeToEnum<Index>::value,
TensorShape({nouter}), &tmp_sorted_indices));
Index* tmp_sorted_indices_ptr = tmp_sorted_indices.flat<Index>().data();
OP_REQUIRES_OK(context, context->allocate_temp(
DataTypeToEnum<SegmentId>::value,
TensorShape({nouter}), &tmp_sorted_segment));
SegmentId* tmp_sorted_segment_ptr =
tmp_sorted_segment.flat<SegmentId>().data();
OP_REQUIRES_OK(context,
GpuRadixSort(context, nouter,
/*keys_in=*/indices_vec.data(),
/*keys_out=*/tmp_sorted_indices_ptr,
/*indices_in=*/segment_vec.data(),
/*indices_out=*/tmp_sorted_segment_ptr,
/*num_bits=*/Log2Ceiling64(noutput)));
sorted_indices_ptr = tmp_sorted_indices_ptr;
sorted_segment_ptr = tmp_sorted_segment_ptr;
}
// Compute the gradient using a weighted SegmentReduceGPU with the segment
// IDs and indices swapped.
using ReduceOp = functor::Sum;
using Treduce = typename ReduceType<ReduceOp, T>::type;
OP_REQUIRES_OK(
context,
SegmentReduceGPU<Treduce>(
context, /*nouter=*/static_cast<SegmentId>(nouter),
/*ninner=*/static_cast<SegmentId>(ninner),
/*nsegments=*/noutput,
/*reduce_op=*/ReduceOp(),
/*initial_value=*/T(0),
/*empty_segment_value=*/T(0),
/*is_mean=*/false, /*is_sqrtn=*/false,
/*input=*/input_flat.data(), /*segment_ids=*/sorted_indices_ptr,
/*indices=*/sorted_segment_ptr, /*weights=*/weights_ptr,
/*output=*/output_flat.data()));
}
};
template <typename TindicesCompact>
struct EdgeIndicatorFunctor {
EdgeIndicatorFunctor(const TindicesCompact* sorted_indices)
: sorted_indices_(sorted_indices) {}
template <typename Idx>
__device__ bool operator()(Idx idx) const {
return idx == 0 ? false : sorted_indices_[idx] != sorted_indices_[idx - 1];
}
private:
const TindicesCompact* __restrict__ sorted_indices_;
};
template <typename Toffsets, typename EdgeIndicatorIter,
typename TindicesCompact, typename Tindices>
__global__ void ScatterUniqueIndicesKernel(
Toffsets nouter,
EdgeIndicatorIter sorted_indices_edge_indicator, // [nouter]
const TindicesCompact* __restrict__ sorted_indices, // [nouter]
const Toffsets* __restrict__ sorted_indices_ids, // [nouter]
Tindices* __restrict__ sorted_unique_indices) { // [num_unique]
for (int i : GpuGridRangeX(nouter)) {
if (i == 0 || sorted_indices_edge_indicator[i]) {
sorted_unique_indices[sorted_indices_ids[i]] =
static_cast<Tindices>(sorted_indices[i]);
}
}
}
template <typename Toffsets, typename EdgeIndicatorIter,
typename TindicesCompact, typename Tindices>
absl::Status LaunchScatterUniqueIndicesKernel(
const GPUDevice& d, Toffsets nouter,
EdgeIndicatorIter sorted_indices_edge_indicator, // [nouter]
const TindicesCompact* __restrict__ sorted_indices, // [nouter]
const Toffsets* __restrict__ sorted_indices_ids, // [nouter]
Tindices* __restrict__ sorted_unique_indices) { // [num_unique]
GpuLaunchConfig config = GetGpuLaunchConfig(
nouter, d,
&ScatterUniqueIndicesKernel<Toffsets, EdgeIndicatorIter, TindicesCompact,
Tindices>,
/*dynamic_shared_memory_size=*/0, /*block_size_limit=*/0);
return GpuLaunchKernel(ScatterUniqueIndicesKernel<Toffsets, EdgeIndicatorIter,
TindicesCompact, Tindices>,
config.block_count, config.thread_per_block, 0,
d.stream(), nouter, sorted_indices_edge_indicator,
sorted_indices, sorted_indices_ids,
sorted_unique_indices);
}
template <typename T, typename Tindices, typename Tsegmentids>
struct SparseSegmentGradV2Functor<GPUDevice, T, Tindices, Tsegmentids> {
void operator()(OpKernelContext* context,
SparseSegmentReductionOperation operation,
typename TTypes<T>::ConstMatrix input_flat,
typename TTypes<Tindices>::ConstVec indices_vec,
typename TTypes<Tsegmentids>::ConstVec segment_vec,
const TensorShape& dense_output_shape,
typename AsyncOpKernel::DoneCallback done) {
const GPUDevice& device = context->eigen_gpu_device();
const int64_t nsegments = input_flat.dimension(0);
const int64_t ninner64 = input_flat.dimension(1);
const int64_t nouter64 = indices_vec.dimension(0);
// Note: nouter and ninner are not expected to be huge, so we use int32 to
// save memory bandwidth.
using Toffsets = int32_t;
OP_REQUIRES_ASYNC(context, nouter64 <= std::numeric_limits<Toffsets>::max(),
absl::InvalidArgumentError(
absl::StrCat("Indices vector of length ", nouter64,
" is too large to fit in int32.")),
done);
const Toffsets nouter = static_cast<Toffsets>(nouter64);
OP_REQUIRES_ASYNC(context, ninner64 <= std::numeric_limits<Toffsets>::max(),
absl::InvalidArgumentError(absl::StrCat(
"Inner data dimension of size ", ninner64,
" is too large to fit in int32.")),
done);
const Toffsets ninner = static_cast<Toffsets>(ninner64);
// Cast indices to 32-bit to save memory bandwidth (the cost of the cast is
// worth it because the vector is used multiple times).
// Note that we can currently assume int32 is safe because the op's dense
// output_dim0 input is always int32.
using TindicesCompact = int32_t;
Tensor tmp_indices_internal;
const TindicesCompact* indices_internal_ptr;
if constexpr (std::is_same<Tindices, TindicesCompact>::value) {
indices_internal_ptr = indices_vec.data();
} else {
OP_REQUIRES_OK_ASYNC(
context,
context->allocate_temp(DataTypeToEnum<TindicesCompact>::value,
TensorShape({nouter}), &tmp_indices_internal),
done);
auto indices_vec_internal = tmp_indices_internal.flat<TindicesCompact>();
indices_vec_internal.device(device) =
indices_vec.template cast<TindicesCompact>();
indices_internal_ptr = indices_vec_internal.data();
}
// Cast segment IDs to smallest possible type to save memory bandwidth.
if (nsegments <= std::numeric_limits<int16_t>::max()) {
CastSegmentIdsThenImpl<Toffsets, TindicesCompact, int16_t>(
context, operation, nouter, ninner, nsegments, input_flat.data(),
tmp_indices_internal, indices_internal_ptr, segment_vec,
dense_output_shape, done);
} else if (sizeof(Tsegmentids) > sizeof(int32_t) &&
nsegments <= std::numeric_limits<int32_t>::max()) {
CastSegmentIdsThenImpl<Toffsets, TindicesCompact, int32_t>(
context, operation, nouter, ninner, nsegments, input_flat.data(),
tmp_indices_internal, indices_internal_ptr, segment_vec,
dense_output_shape, done);
} else {
Impl<Toffsets, TindicesCompact, Tsegmentids>(
context, operation, nouter, ninner, nsegments, input_flat.data(),
tmp_indices_internal, indices_internal_ptr, Tensor(),
segment_vec.data(), dense_output_shape, done);
}
}
private:
using Tweights = typename RealTypeIfComplex<T>::type;
template <typename Toffsets, typename TindicesCompact,
typename Tsegmentids_internal>
void CastSegmentIdsThenImpl(
OpKernelContext* context, SparseSegmentReductionOperation operation,
Toffsets nouter, Toffsets ninner, Tsegmentids_internal nsegments,
const T* input, Tensor indices_tensor, const TindicesCompact* indices,
typename TTypes<Tsegmentids>::ConstVec segment_vec,
const TensorShape& dense_output_shape,
typename AsyncOpKernel::DoneCallback done) {
const GPUDevice& device = context->eigen_gpu_device();
Tensor tmp_segment_internal;
OP_REQUIRES_OK_ASYNC(
context,
context->allocate_temp(DataTypeToEnum<Tsegmentids_internal>::value,
TensorShape({nouter}), &tmp_segment_internal),
done);
auto segment_vec_internal =
tmp_segment_internal.flat<Tsegmentids_internal>();
segment_vec_internal.device(device) =
segment_vec.template cast<Tsegmentids_internal>();
Impl<Toffsets, TindicesCompact, Tsegmentids_internal>(
context, operation, nouter, ninner, nsegments, input, indices_tensor,
indices, tmp_segment_internal, segment_vec_internal.data(),
dense_output_shape, done);
}
template <typename Toffsets, typename TindicesCompact,
typename Tsegmentids_internal>
void Impl(OpKernelContext* context, SparseSegmentReductionOperation operation,
Toffsets nouter, Toffsets ninner, Tsegmentids_internal nsegments,
const T* input, Tensor indices_tensor,
const TindicesCompact* indices, Tensor segment_ids_tensor,
const Tsegmentids_internal* segment_ids,
const TensorShape& dense_output_shape,
typename AsyncOpKernel::DoneCallback done) {
const int64_t dense_output_dim0 = dense_output_shape.dim_size(0);
// Allocate and compute segment weights (for Mean/SqrtN operations only).
Tensor tmp_weights;
Tweights* weights_ptr = nullptr;
if (operation != SparseSegmentReductionOperation::kSum) {
ComputeSegmentWeights(context, operation, nsegments, nouter, segment_ids,
&tmp_weights, done);
weights_ptr = tmp_weights.flat<Tweights>().data();
}
const TindicesCompact* sorted_indices_ptr = indices;
const Tsegmentids_internal* permuted_segment_ptr = segment_ids;
Tensor tmp_sorted_indices;
Tensor tmp_permuted_segment;
if (dense_output_dim0 > 1) {
// Sort indices and permute segments.
OP_REQUIRES_OK_ASYNC(
context,
context->allocate_temp(DataTypeToEnum<TindicesCompact>::value,
TensorShape({nouter}), &tmp_sorted_indices),
done);
TindicesCompact* tmp_sorted_indices_ptr =
tmp_sorted_indices.flat<TindicesCompact>().data();
OP_REQUIRES_OK_ASYNC(
context,
context->allocate_temp(DataTypeToEnum<Tsegmentids_internal>::value,
TensorShape({nouter}), &tmp_permuted_segment),
done);
Tsegmentids_internal* tmp_permuted_segment_ptr =
tmp_permuted_segment.flat<Tsegmentids_internal>().data();
OP_REQUIRES_OK_ASYNC(
context,
GpuRadixSort(context, nouter,
/*keys_in=*/indices,
/*keys_out=*/tmp_sorted_indices_ptr,
/*indices_in=*/segment_ids,
/*indices_out=*/tmp_permuted_segment_ptr,
/*num_bits=*/Log2Ceiling64(dense_output_dim0)),
done);
sorted_indices_ptr = tmp_sorted_indices_ptr;
permuted_segment_ptr = tmp_permuted_segment_ptr;
// The original tensors are no longer needed.
indices_tensor = Tensor();
indices = nullptr;
segment_ids_tensor = Tensor();
segment_ids = nullptr;
}
using CountIter = gpuprim::CountingInputIterator<Toffsets>;
using EdgeIndicatorIter = gpuprim::TransformInputIterator<
Toffsets, EdgeIndicatorFunctor<TindicesCompact>, CountIter>;
EdgeIndicatorIter sorted_indices_edge_indicator(
CountIter(0),
EdgeIndicatorFunctor<TindicesCompact>(sorted_indices_ptr));
Tensor tmp_sorted_indices_unique_ids;
OP_REQUIRES_OK_ASYNC(context,
context->allocate_temp(DataTypeToEnum<Toffsets>::value,
TensorShape({nouter}),
&tmp_sorted_indices_unique_ids),
done);
Toffsets* sorted_indices_unique_ids_ptr =
tmp_sorted_indices_unique_ids.flat<Toffsets>().data();
OP_REQUIRES_OK_ASYNC(
context,
GpuInclusivePrefixSum(context, nouter, sorted_indices_edge_indicator,
sorted_indices_unique_ids_ptr),
done);
se::Stream* stream = context->op_device_context()->stream();
OP_REQUIRES_ASYNC(context, stream,
absl::InternalError("No GPU stream available."), done);
// Copy the last element of sorted_indices_unique_ids back to the host to
// obtain num_unique.
ScratchSpace<Toffsets> last_idx_host(context, 1, /*on_host=*/true);
OP_REQUIRES_OK_ASYNC(
context,
stream->Memcpy(
last_idx_host.mutable_data(),
stream_executor::DeviceAddressBase(
const_cast<Toffsets*>(sorted_indices_unique_ids_ptr) +
(nouter - 1),
sizeof(*last_idx_host.data())),
sizeof(*last_idx_host.data())),
done);
auto async_finish_computation =
[this, context, dense_output_shape, nouter, ninner, input,
indices_tensor, tmp_sorted_indices, sorted_indices_ptr,
tmp_sorted_indices_unique_ids, sorted_indices_unique_ids_ptr,
segment_ids_tensor, tmp_permuted_segment, permuted_segment_ptr,
sorted_indices_edge_indicator, tmp_weights, weights_ptr, last_idx_host,
done]() -> void {
const GPUDevice& device = context->eigen_gpu_device();
Toffsets num_unique = (*last_idx_host.data()) + 1;
std::unique_ptr<se::ActivateContext> scoped_activation =
context->op_device_context()->stream()->parent()->Activate();
TensorShape output_shape = dense_output_shape;
OP_REQUIRES_OK_ASYNC(context,
output_shape.SetDimWithStatus(0, num_unique), done);
Tensor* output = nullptr;
T* output_ptr;
OP_REQUIRES_OK_ASYNC(
context, context->allocate_output(0, output_shape, &output), done);
output_ptr = output->flat<T>().data();
// Compute the gradient using a weighted SegmentReduceGPU with the segment
// IDs and indices swapped.
using ReduceOp = functor::Sum;
using Treduce = typename ReduceType<ReduceOp, T>::type;
OP_REQUIRES_OK_ASYNC(context,
SegmentReduceGPU<Treduce>(
context, /*nouter=*/nouter,
/*ninner=*/ninner,
/*nsegments=*/num_unique,
/*reduce_op=*/ReduceOp(),
/*initial_value=*/T(0),
/*empty_segment_value=*/T(0),
/*is_mean=*/false, /*is_sqrtn=*/false,
/*input=*/input,
/*segment_ids=*/sorted_indices_unique_ids_ptr,
/*indices=*/permuted_segment_ptr,
/*weights=*/weights_ptr,
/*output=*/output_ptr),
done);
Tensor* sorted_unique_indices = nullptr;
Tindices* sorted_unique_indices_ptr;
OP_REQUIRES_OK_ASYNC(
context,
context->allocate_output(1, TensorShape({num_unique}),
&sorted_unique_indices),
done);
sorted_unique_indices_ptr =
sorted_unique_indices->flat<Tindices>().data();
OP_REQUIRES_OK_ASYNC(
context,
LaunchScatterUniqueIndicesKernel(
device, nouter, sorted_indices_edge_indicator, sorted_indices_ptr,
sorted_indices_unique_ids_ptr, sorted_unique_indices_ptr),
done);
done();
};
context->device()
->tensorflow_accelerator_device_info()
->event_mgr->ThenExecute(stream, async_finish_computation);
}
template <typename Tsegmentids_internal, typename Toffsets>
void ComputeSegmentWeights(OpKernelContext* context,
SparseSegmentReductionOperation operation,
Tsegmentids_internal nsegments, Toffsets nouter,
const Tsegmentids_internal* segment_ids,
Tensor* tmp_weights,
typename AsyncOpKernel::DoneCallback done) {
const GPUDevice& device = context->eigen_gpu_device();
OP_REQUIRES_OK_ASYNC(
context,
context->allocate_temp(DataTypeToEnum<Tweights>::value,
TensorShape({nsegments}), tmp_weights),
done);
Tweights* weights_ptr = tmp_weights->flat<Tweights>().data();
// Allocate and compute segment_offsets.
Tensor tmp_segment_offsets;
OP_REQUIRES_OK_ASYNC(context,
context->allocate_temp(DataTypeToEnum<Toffsets>::value,
TensorShape({nsegments + 1}),
&tmp_segment_offsets),
done);
Toffsets* segment_offsets_ptr = tmp_segment_offsets.flat<Toffsets>().data();
OP_REQUIRES_OK_ASYNC(
context,
LaunchSegmentOffsetsKernel(device, nouter, nsegments, segment_ids,
segment_offsets_ptr),
done);
// Compute the weights based on the segment sizes using segment_offsets.
OP_REQUIRES_OK_ASYNC(
context,
LaunchSegmentWeightsKernel(device, nsegments, operation,
segment_offsets_ptr, weights_ptr),
done);
}
};
} // namespace functor
} // namespace tensorflow
#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#endif // TENSORFLOW_CORE_KERNELS_SEGMENT_REDUCTION_OPS_GPU_CU_H_
|
c
|
github
|
https://github.com/tensorflow/tensorflow
|
tensorflow/core/kernels/segment_reduction_ops_gpu.cu.h
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import collections
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
class GypError(Exception):
"""Error class representing an error, which is to be presented
to the user. The main entry point will catch and display this.
"""
pass
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def FindQualifiedTargets(target, qualified_list):
"""
Given a list of qualified targets, return the qualified targets for the
specified |target|.
"""
return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to, follow_path_symlink=True):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# If |follow_symlink_path| is true (default) and |path| is a symlink, then
# this method returns a path to the real file represented by |path|. If it is
# false, this method returns a path to the symlink. If |path| is not a
# symlink, this option has no effect.
# Convert to normalized (and therefore absolute paths).
if follow_path_symlink:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
relative_to = os.path.realpath(relative_to)
# On Windows, we can't create a relative path to a different drive, so just
# use the absolute path.
if sys.platform == 'win32':
if (os.path.splitdrive(path)[0].lower() !=
os.path.splitdrive(relative_to)[0].lower()):
return path
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
@memoize
def InvertRelativePath(path, toplevel_dir=None):
"""Given a path like foo/bar that is relative to toplevel_dir, return
the inverse relative path back to the toplevel_dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string, unless the path contains symlinks.
"""
if not path:
return path
toplevel_dir = '.' if toplevel_dir is None else toplevel_dir
return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path))
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer(object):
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def EnsureDirExists(path):
"""Make sure the directory for |path| exists."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
}
if 'flavor' in params:
return params['flavor']
if sys.platform in flavors:
return flavors[sys.platform]
if sys.platform.startswith('sunos'):
return 'solaris'
if sys.platform.startswith('freebsd'):
return 'freebsd'
if sys.platform.startswith('openbsd'):
return 'openbsd'
if sys.platform.startswith('netbsd'):
return 'netbsd'
if sys.platform.startswith('aix'):
return 'aix'
if sys.platform.startswith('zos'):
return 'zos'
if sys.platform.startswith('os390'):
return 'zos'
return 'linux'
def CopyTool(flavor, out_path):
"""Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
# aix and solaris just need flock emulation. mac and win use more complicated
# support scripts.
prefix = {
'aix': 'flock',
'solaris': 'flock',
'mac': 'mac',
'win': 'win'
}.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
# The second argument is an addition that causes a pylint warning.
def pop(self, last=True): # pylint: disable=W0221
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
# Extensions to the recipe.
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
r"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
def CrossCompileRequested():
# TODO: figure out how to not build extra host objects in the
# non-cross-compile case when this is enabled, and enable unconditionally.
return (os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
# -*- python-indent-offset: 4; -*-
import pandas as pd
import numpy as np
import sys
import os
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
import pmagpy.pmag as pmag
import pmagpy.pmagplotlib as pmagplotlib
import pmagpy.contribution_builder as cb
from pmag_env import set_env
def save_redo(SpecRecs, inspec):
print("Saving changes to specimen file")
pmag.magic_write(inspec, SpecRecs, 'specimens')
def main():
"""
NAME
zeq_magic.py
DESCRIPTION
reads in magic_measurements formatted file, makes plots of remanence decay
during demagnetization experiments. Reads in prior interpretations saved in
a pmag_specimens formatted file [and allows re-interpretations of best-fit lines
and planes and saves (revised or new) interpretations in a pmag_specimens file.
interpretations are saved in the coordinate system used. Also allows judicious editting of
measurements to eliminate "bad" measurements. These are marked as such in the magic_measurements
input file. they are NOT deleted, just ignored. ] Bracketed part not yet implemented
SYNTAX
zeq_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f MEASFILE: sets measurements format input file, default: measurements.txt
-fsp SPECFILE: sets specimens format file with prior interpreations, default: specimens.txt
-fsa SAMPFILE: sets samples format file sample=>site information, default: samples.txt
-fsi SITEFILE: sets sites format file with site=>location informationprior interpreations, default: samples.txt
-Fp PLTFILE: sets filename for saved plot, default is name_type.fmt (where type is zijd, eqarea or decay curve)
-crd [s,g,t]: sets coordinate system, g=geographic, t=tilt adjusted, default: specimen coordinate system
-spc SPEC plots single specimen SPEC, saves plot with specified format
with optional -dir settings and quits
-dir [L,P,F][beg][end]: sets calculation type for principal component analysis, default is none
beg: starting step for PCA calculation
end: ending step for PCA calculation
[L,P,F]: calculation type for line, plane or fisher mean
must be used with -spc option
-fmt FMT: set format of saved plot [png,svg,jpg]
-A: suppresses averaging of replicate measurements, default is to average
-sav: saves all plots without review
SCREEN OUTPUT:
Specimen, N, a95, StepMin, StepMax, Dec, Inc, calculation type
"""
# initialize some variables
doave, e, b = 1, 0, 0 # average replicates, initial end and beginning step
intlist = ['magn_moment', 'magn_volume', 'magn_mass', 'magnitude']
plots, coord = 0, 's'
noorient = 0
version_num = pmag.get_version()
verbose = pmagplotlib.verbose
calculation_type, fmt = "", "svg"
spec_keys = []
geo, tilt, ask = 0, 0, 0
PriorRecs = [] # empty list for prior interpretations
backup = 0
specimen = "" # can skip everything and just plot one specimen with bounds e,b
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
dir_path = pmag.get_named_arg("-WD", default_val=os.getcwd())
meas_file = pmag.get_named_arg(
"-f", default_val="measurements.txt")
spec_file = pmag.get_named_arg(
"-fsp", default_val="specimens.txt")
samp_file = pmag.get_named_arg("-fsa", default_val="samples.txt")
site_file = pmag.get_named_arg("-fsi", default_val="sites.txt")
#meas_file = os.path.join(dir_path, meas_file)
#spec_file = os.path.join(dir_path, spec_file)
#samp_file = os.path.join(dir_path, samp_file)
#site_file = os.path.join(dir_path, site_file)
plot_file = pmag.get_named_arg("-Fp", default_val="")
crd = pmag.get_named_arg("-crd", default_val="s")
if crd == "s":
coord = "-1"
elif crd == "t":
coord = "100"
else:
coord = "0"
saved_coord = coord
fmt = pmag.get_named_arg("-fmt", "svg")
specimen = pmag.get_named_arg("-spc", default_val="")
#if specimen: # just save plot and exit
# plots, verbose = 1, 0
beg_pca, end_pca = "", ""
if '-dir' in sys.argv:
ind = sys.argv.index('-dir')
direction_type = sys.argv[ind + 1]
beg_pca = int(sys.argv[ind + 2])
end_pca = int(sys.argv[ind + 3])
if direction_type == 'L':
calculation_type = 'DE-BFL'
if direction_type == 'P':
calculation_type = 'DE-BFP'
if direction_type == 'F':
calculation_type = 'DE-FM'
if '-A' in sys.argv:
doave = 0
if '-sav' in sys.argv:
plots, verbose = 1, 0
#
first_save = 1
fnames = {'measurements': meas_file, 'specimens': spec_file,
'samples': samp_file, 'sites': site_file}
contribution = cb.Contribution(dir_path, custom_filenames=fnames, read_tables=[
'measurements', 'specimens', 'samples', 'sites'])
#
# import specimens
if 'measurements' not in contribution.tables:
print('-W- No measurements table found in your working directory')
return
specimen_cols = ['analysts', 'aniso_ftest', 'aniso_ftest12', 'aniso_ftest23', 'aniso_s', 'aniso_s_mean', 'aniso_s_n_measurements', 'aniso_s_sigma', 'aniso_s_unit', 'aniso_tilt_correction', 'aniso_type', 'aniso_v1', 'aniso_v2', 'aniso_v3', 'citations', 'description', 'dir_alpha95', 'dir_comp', 'dir_dec', 'dir_inc', 'dir_mad_free', 'dir_n_measurements', 'dir_tilt_correction', 'experiments', 'geologic_classes',
'geologic_types', 'hyst_bc', 'hyst_bcr', 'hyst_mr_moment', 'hyst_ms_moment', 'int_abs', 'int_b', 'int_b_beta', 'int_b_sigma', 'int_corr', 'int_dang', 'int_drats', 'int_f', 'int_fvds', 'int_gamma', 'int_mad_free', 'int_md', 'int_n_measurements', 'int_n_ptrm', 'int_q', 'int_rsc', 'int_treat_dc_field', 'lithologies', 'meas_step_max', 'meas_step_min', 'meas_step_unit', 'method_codes', 'sample', 'software_packages', 'specimen']
if 'specimens' in contribution.tables:
contribution.propagate_name_down('sample','measurements')
# add location/site info to measurements table for naming plots
if pmagplotlib.isServer:
contribution.propagate_name_down('site', 'measurements')
contribution.propagate_name_down('location', 'measurements')
spec_container = contribution.tables['specimens']
if 'method_codes' not in spec_container.df.columns:
spec_container.df['method_codes'] = None
prior_spec_data = spec_container.get_records_for_code(
'LP-DIR', strict_match=False) # look up all prior directional interpretations
#
# tie sample names to measurement data
#
else:
spec_container, prior_spec_data = None, []
#
# import samples for orientation info
#
if 'samples' in contribution.tables:
samp_container=contribution.tables['samples']
samps=samp_container.df
samp_data=samps.to_dict('records')# convert to list of dictionaries for use with get_orient
else:
samp_data=[]
#if ('samples' in contribution.tables) and ('specimens' in contribution.tables):
# # contribution.propagate_name_down('site','measurements')
# contribution.propagate_cols(col_names=[
# 'azimuth', 'dip', 'orientation_quality','bed_dip','bed_dip_direction'], target_df_name='measurements', source_df_name='samples')
##
# define figure numbers for equal area, zijderveld,
# and intensity vs. demagnetiztion step respectively
#
ZED = {}
ZED['eqarea'], ZED['zijd'], ZED['demag'] = 1, 2, 3
pmagplotlib.plot_init(ZED['eqarea'], 6, 6)
pmagplotlib.plot_init(ZED['zijd'], 6, 6)
pmagplotlib.plot_init(ZED['demag'], 6, 6)
# save_pca=0
angle, direction_type, setangle = "", "", 0
# create measurement dataframe
#
meas_container = contribution.tables['measurements']
meas_data = meas_container.df
#
meas_data = meas_data[meas_data['method_codes'].str.contains(
'LT-NO|LT-AF-Z|LT-T-Z|LT-M-Z') == True] # fish out steps for plotting
meas_data = meas_data[meas_data['method_codes'].str.contains(
'AN|ARM|LP-TRM|LP-PI-ARM') == False] # strip out unwanted experiments
intensity_types = [
col_name for col_name in meas_data.columns if col_name in intlist]
intensity_types = [
col_name for col_name in intensity_types if any(meas_data[col_name])]
if not len(intensity_types):
print('-W- No intensity columns found')
return
# plot non-empty intensity method found - normalized to initial value anyway -
# doesn't matter which used
int_key = cb.get_intensity_col(meas_data)
# get all the non-null intensity records of the same type
meas_data = meas_data[meas_data[int_key].notnull()]
if 'quality' not in meas_data.columns:
meas_data['quality'] = 'g' # set the default flag to good
# need to treat LP-NO specially for af data, treatment should be zero,
# otherwise 273.
#meas_data['treatment'] = meas_data['treat_ac_field'].where(
# cond=meas_data['treat_ac_field'] != 0, other=meas_data['treat_temp'])
meas_data['treatment'] = meas_data['treat_ac_field'].where(
cond=meas_data['treat_ac_field'].astype(bool), other=meas_data['treat_temp'])
meas_data['ZI'] = 1 # initialize these to one
meas_data['instrument_codes'] = "" # initialize these to blank
# for unusual case of microwave power....
if 'treat_mw_power' in meas_data.columns:
meas_data.loc[
(meas_data.treat_mw_power != 0) &
(meas_data.treat_mw_power) &
(meas_data.treat_mw_time),
'treatment'] = meas_data.treat_mw_power * meas_data.treat_mw_time
#
# get list of unique specimen names from measurement data
#
# this is a list of all the specimen names
specimen_names = meas_data.specimen.unique()
specimen_names = specimen_names.tolist()
specimen_names.sort()
#
# set up new DataFrame for this sessions specimen interpretations
#
data_container = cb.MagicDataFrame(
dtype='specimens', columns=specimen_cols)
# this is for interpretations from this session
current_spec_data = data_container.df
if specimen == "":
k = 0
else:
k = specimen_names.index(specimen)
# let's look at the data now
while k < len(specimen_names):
mpars={"specimen_direction_type": "Error"}
# set the current specimen for plotting
this_specimen = str(specimen_names[k])
# reset beginning/end pca if plotting more than one specimen
if not specimen:
beg_pca, end_pca = "", ""
if verbose and this_specimen != "":
print(this_specimen, k + 1, 'out of ', len(specimen_names))
if setangle == 0:
angle = ""
this_specimen_measurements = meas_data[meas_data['specimen'].astype(str).str.contains(
this_specimen).astype(bool)] # fish out this specimen
this_specimen_measurements = this_specimen_measurements[-this_specimen_measurements['quality'].str.contains(
'b').astype(bool)] # remove bad measurements
if len(this_specimen_measurements) != 0: # if there are measurements
meas_list=this_specimen_measurements.to_dict('records') # get a list of dictionaries
this_sample=""
if coord != '-1' and 'sample' in meas_list[0].keys(): # look up sample name
this_sample=pmag.get_dictitem(meas_list,'specimen',this_specimen,'T')
if len(this_sample)>0:
this_sample=this_sample[0]['sample']
#
# set up datablock [[treatment,dec, inc, int, direction_type],[....]]
#
#
# figure out the method codes
#
units, methods, title = "", "", this_specimen
if pmagplotlib.isServer:
try:
loc = this_specimen_measurements.loc[:, 'location'].values[0]
except:
loc = ""
try:
site = this_specimen_measurements.loc[:, 'site'].values[0]
except:
site = ""
try:
samp = this_specimen_measurements.loc[:, 'sample'].values[0]
except:
samp = ""
title = "LO:_{}_SI:_{}_SA:_{}_SP:_{}_".format(loc, site, samp, this_specimen)
# this is a list of all the specimen method codes
meas_meths = this_specimen_measurements.method_codes.unique()
tr = pd.to_numeric(this_specimen_measurements.treatment).tolist()
if any(cb.is_null(treat, False) for treat in tr):
print('-W- Missing required values in measurements.treatment for {}, skipping'.format(this_specimen))
if specimen:
return
k += 1
continue
if set(tr) == set([0]):
print('-W- Missing required values in measurements.treatment for {}, skipping'.format(this_specimen))
if specimen:
return
k += 1
continue
for m in meas_meths:
if 'LT-AF-Z' in m and 'T' not in units:
units = 'T' # units include tesla
tr[0] = 0
if 'LT-T-Z' in m and 'K' not in units:
units = units + ":K" # units include kelvin
if 'LT-M-Z' in m and 'J' not in units:
units = units + ':J' # units include joules
tr[0] = 0
units = units.strip(':') # strip off extra colons
if 'LP-' in m:
methods = methods + ":" + m
decs = pd.to_numeric(this_specimen_measurements.dir_dec).tolist()
incs = pd.to_numeric(this_specimen_measurements.dir_inc).tolist()
#
# fix the coordinate system
#
# revert to original coordinate system
coord = saved_coord
if coord != '-1': # need to transform coordinates to geographic
# get the azimuth
or_info,az_type=pmag.get_orient(samp_data,this_sample,data_model=3)
if 'azimuth' in or_info.keys() and cb.not_null(or_info['azimuth'], False):
#azimuths = pd.to_numeric(
# this_specimen_measurements.azimuth).tolist()
#dips = pd.to_numeric(this_specimen_measurements.dip).tolist()
azimuths=len(decs)*[or_info['azimuth']]
dips=len(decs)*[or_info['dip']]
# if azimuth/dip is missing, plot using specimen coordinates instead
else:
azimuths,dips=[],[]
if any([cb.is_null(az) for az in azimuths if az != 0]):
coord = '-1'
print("-W- Couldn't find azimuth and dip for {}".format(this_specimen))
print(" Plotting with specimen coordinates instead")
elif any([cb.is_null(dip) for dip in dips if dip != 0]):
coord = '-1'
print("-W- Couldn't find azimuth and dip for {}".format(this_specimen))
print(" Plotting with specimen coordinates instead")
else:
coord = saved_coord
# if azimuth and dip were found, continue with geographic coordinates
if coord != "-1" and len(azimuths)>0:
dirs = [decs, incs, azimuths, dips]
# this transposes the columns and rows of the list of lists
dirs_geo = np.array(list(map(list, list(zip(*dirs)))))
decs, incs = pmag.dogeo_V(dirs_geo)
if coord == '100' and 'bed_dip_direction' in or_info.keys() and or_info['bed_dip_direction']!="": # need to do tilt correction too
bed_dip_dirs=len(decs)*[or_info['bed_dip_direction']]
bed_dips=len(decs)*[or_info['bed_dip']]
#bed_dip_dirs = pd.to_numeric(
# this_specimen_measurements.bed_dip_direction).tolist() # get the azimuths
#bed_dips = pd.to_numeric(
# this_specimen_measurements.bed_dip).tolist() # get the azimuths
dirs = [decs, incs, bed_dip_dirs, bed_dips]
## this transposes the columns and rows of the list of lists
dirs_tilt = np.array(list(map(list, list(zip(*dirs)))))
decs, incs = pmag.dotilt_V(dirs_tilt)
if pmagplotlib.isServer:
title = title + "CO:_t_"
else:
title = title + '_t'
else:
if pmagplotlib.isServer:
title = title + "CO:_g_"
else:
title = title + '_g'
if angle == "":
angle = decs[0]
ints = pd.to_numeric(this_specimen_measurements[int_key]).tolist()
ZI = this_specimen_measurements.ZI.tolist()
flags = this_specimen_measurements.quality.tolist()
codes = this_specimen_measurements.instrument_codes.tolist()
datalist = [tr, decs, incs, ints, ZI, flags, codes]
# this transposes the columns and rows of the list of lists
datablock = list(map(list, list(zip(*datalist))))
pmagplotlib.plot_zed(ZED, datablock, angle, title, units)
if verbose and not set_env.IS_WIN:
pmagplotlib.draw_figs(ZED)
#
# collect info for current_specimen_interpretation dictionary
#
#
# find prior interpretation
#
prior_specimen_interpretations=[]
if len(prior_spec_data):
prior_specimen_interpretations = prior_spec_data[prior_spec_data['specimen'].astype(str).str.contains(this_specimen) == True]
if (beg_pca == "") and (len(prior_specimen_interpretations) != 0):
if len(prior_specimen_interpretations)>0:
beg_pcas = pd.to_numeric(
prior_specimen_interpretations.meas_step_min.values).tolist()
end_pcas = pd.to_numeric(
prior_specimen_interpretations.meas_step_max.values).tolist()
spec_methods = prior_specimen_interpretations.method_codes.tolist()
# step through all prior interpretations and plot them
for ind in range(len(beg_pcas)):
spec_meths = spec_methods[ind].split(':')
for m in spec_meths:
if 'DE-BFL' in m:
calculation_type = 'DE-BFL' # best fit line
if 'DE-BFP' in m:
calculation_type = 'DE-BFP' # best fit plane
if 'DE-FM' in m:
calculation_type = 'DE-FM' # fisher mean
if 'DE-BFL-A' in m:
calculation_type = 'DE-BFL-A' # anchored best fit line
if len(beg_pcas)!=0:
try:
# getting the starting and ending points
start, end = tr.index(beg_pcas[ind]), tr.index(end_pcas[ind])
mpars = pmag.domean(
datablock, start, end, calculation_type)
except ValueError:
print('-W- Specimen record contains invalid start/stop bounds:')
mpars['specimen_direction_type'] = "Error"
# calculate direction/plane
if mpars["specimen_direction_type"] != "Error":
# put it on the plot
pmagplotlib.plot_dir(ZED, mpars, datablock, angle)
if verbose and not set_env.IS_WIN:
pmagplotlib.draw_figs(ZED)
### SKIP if no prior interpretation - this section should not be used:
# else:
# try:
# start, end = int(beg_pca), int(end_pca)
# except ValueError:
# beg_pca = 0
# end_pca = len(datablock) - 1
# start, end = int(beg_pca), int(end_pca)
# # # calculate direction/plane
# try:
# mpars = pmag.domean(datablock, start, end, calculation_type)
# except Exception as ex:
# print('-I- Problem with {}'.format(this_specimen))
# print(' ', ex)
# print(' Skipping')
# continue
# k += 1
# if mpars["specimen_direction_type"] != "Error":
# # put it on the plot
# pmagplotlib.plot_dir(ZED, mpars, datablock, angle)
# if verbose:
# pmagplotlib.draw_figs(ZED)
if plots == 1 or specimen != "":
if plot_file == "":
basename = title
else:
basename = plot_file
files = {}
for key in list(ZED.keys()):
files[key] = basename + '_' + key + '.' + fmt
if pmagplotlib.isServer:
files[key] = basename + "TY:_{}_.".format(key) + fmt
pmagplotlib.save_plots(ZED, files)
if specimen != "":
sys.exit()
if verbose:
recnum = 0
for plotrec in datablock:
if units == 'T':
print('%s: %i %7.1f %s %8.3e %7.1f %7.1f %s' % (
plotrec[5], recnum, plotrec[0] * 1e3, " mT", plotrec[3], plotrec[1], plotrec[2], plotrec[6]))
if units == "K":
print('%s: %i %7.1f %s %8.3e %7.1f %7.1f %s' % (
plotrec[5], recnum, plotrec[0] - 273, ' C', plotrec[3], plotrec[1], plotrec[2], plotrec[6]))
if units == "J":
print('%s: %i %7.1f %s %8.3e %7.1f %7.1f %s' % (
plotrec[5], recnum, plotrec[0], ' J', plotrec[3], plotrec[1], plotrec[2], plotrec[6]))
if 'K' in units and 'T' in units:
if plotrec[0] >= 1.:
print('%s: %i %7.1f %s %8.3e %7.1f %7.1f %s' % (
plotrec[5], recnum, plotrec[0] - 273, ' C', plotrec[3], plotrec[1], plotrec[2], plotrec[6]))
if plotrec[0] < 1.:
print('%s: %i %7.1f %s %8.3e %7.1f %7.1f %s' % (
plotrec[5], recnum, plotrec[0] * 1e3, " mT", plotrec[3], plotrec[1], plotrec[2], plotrec[6]))
recnum += 1
# we have a current interpretation
elif mpars["specimen_direction_type"] != "Error":
#
# create a new specimen record for the interpreation for this
# specimen
this_specimen_interpretation = {
col: "" for col in specimen_cols}
# this_specimen_interpretation["analysts"]=user
this_specimen_interpretation['software_packages'] = version_num
this_specimen_interpretation['specimen'] = this_specimen
this_specimen_interpretation["method_codes"] = calculation_type
this_specimen_interpretation["meas_step_unit"] = units
this_specimen_interpretation["meas_step_min"] = tr[start]
this_specimen_interpretation["meas_step_max"] = tr[end]
this_specimen_interpretation["dir_dec"] = '%7.1f' % (
mpars['specimen_dec'])
this_specimen_interpretation["dir_inc"] = '%7.1f' % (
mpars['specimen_inc'])
if "specimen_dang" in mpars:
this_specimen_interpretation["dir_dang"] = '%7.1f' % (
mpars['specimen_dang'])
else:
this_specimen_interpretation["dir_dang"] = ''
this_specimen_interpretation["dir_n_measurements"] = '%i' % (
mpars['specimen_n'])
this_specimen_interpretation["dir_tilt_correction"] = coord
methods = methods.replace(" ", "")
if "T" in units:
methods = methods + ":LP-DIR-AF"
if "K" in units:
methods = methods + ":LP-DIR-T"
if "J" in units:
methods = methods + ":LP-DIR-M"
this_specimen_interpretation["method_codes"] = methods.strip(
':')
this_specimen_interpretation["experiments"] = this_specimen_measurements.experiment.unique()[
0]
#
# print some stuff
#
if calculation_type != 'DE-FM':
this_specimen_interpretation["dir_mad_free"] = '%7.1f' % (
mpars['specimen_mad'])
this_specimen_interpretation["dir_alpha95"] = ''
if verbose:
if units == 'K':
print('%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %s \n' % (this_specimen_interpretation["specimen"], int(this_specimen_interpretation["dir_n_measurements"]), float(this_specimen_interpretation["dir_mad_free"]), float(this_specimen_interpretation["dir_dang"]), float(
this_specimen_interpretation["meas_step_min"]) - 273, float(this_specimen_interpretation["meas_step_max"]) - 273, float(this_specimen_interpretation["dir_dec"]), float(this_specimen_interpretation["dir_inc"]), calculation_type))
elif units == 'T':
print('%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %s \n' % (this_specimen_interpretation["specimen"], int(this_specimen_interpretation["dir_n_measurements"]), float(this_specimen_interpretation["dir_mad_free"]), float(this_specimen_interpretation["dir_dang"]), float(
this_specimen_interpretation["meas_step_min"]) * 1e3, float(this_specimen_interpretation["meas_step_max"]) * 1e3, float(this_specimen_interpretation["dir_dec"]), float(this_specimen_interpretation["dir_inc"]), calculation_type))
elif 'T' in units and 'K' in units:
if float(this_specimen_interpretation['meas_step_min']) < 1.0:
min = float(
this_specimen_interpretation['meas_step_min']) * 1e3
else:
min = float(
this_specimen_interpretation['meas_step_min']) - 273
if float(this_specimen_interpretation['meas_step_max']) < 1.0:
max = float(
this_specimen_interpretation['meas_step_max']) * 1e3
else:
max = float(
this_specimen_interpretation['meas_step_max']) - 273
print('%s %i %7.1f %i %i %7.1f %7.1f %7.1f %s \n' % (this_specimen_interpretation["specimen"], int(this_specimen_interpretation["dir_n_measurements"]), float(this_specimen_interpretation["dir_mad_free"]), float(
this_specimen_interpretation["dir_dang"]), min, max, float(this_specimen_interpretation["dir_dec"]), float(this_specimen_interpretation["dir_inc"]), calculation_type))
else:
print('%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %s \n' % (this_specimen_interpretation["specimen"], int(this_specimen_interpretation["dir_n_measurements"]), float(this_specimen_interpretation["dir_mad_free"]), float(this_specimen_interpretation["dir_dang"]), float(
this_specimen_interpretation["meas_step_min"]), float(this_specimen_interpretation["meas_step_max"]), float(this_specimen_interpretation["dir_dec"]), float(this_specimen_interpretation["dir_inc"]), calculation_type))
else:
this_specimen_interpretation["dir_alpha95"] = '%7.1f' % (
mpars['specimen_alpha95'])
this_specimen_interpretation["dir_mad_free"] = ''
if verbose:
if 'K' in units:
print('%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %s \n' % (this_specimen_interpretation["specimen"], int(this_specimen_interpretation["dir_n_measurments"]), float(this_specimen_interpretation["dir_mad_free"]), float(this_specimen_interpretation["dir_dang"]), float(
this_specimen_interpretation["meas_step_min"]) - 273, float(this_specimen_interpretation["meas_step_max"]) - 273, float(this_specimen_interpretation["dir_dec"]), float(this_specimen_interpretation["dir_inc"]), calculation_type))
elif 'T' in units:
print('%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %s \n' % (this_specimen_interpretation["specimen"], int(this_specimen_interpretation["dir_n_measurements"]), float(this_specimen_interpretation["dir_alpha95"]), float(this_specimen_interpretation["dir_dang"]), float(
this_specimen_interpretation["meas_step_min"]) * 1e3, float(this_specimen_interpretation["meas_step_max"]) * 1e3, float(this_specimen_interpretation["dir_dec"]), float(this_specimen_interpretation["dir_inc"]), calculation_type))
elif 'T' in units and 'K' in units:
if float(this_specimen_interpretation['meas_step_min']) < 1.0:
min = float(
this_specimen_interpretation['meas_step_min']) * 1e3
else:
min = float(
this_specimen_interpretation['meas_step_min']) - 273
if float(this_specimen_interpretation['meas_step_max']) < 1.0:
max = float(
this_specimen_interpretation['meas_step_max']) * 1e3
else:
max = float(
this_specimen_interpretation['meas_step_max']) - 273
print('%s %i %7.1f %i %i %7.1f %7.1f %s \n' % (this_specimen_interpretation["specimen"], int(this_specimen_interpretation["dir_n_measurements"]), float(
this_specimen_interpretation["dir_alpha95"]), min, max, float(this_specimen_interpretation["dir_dec"]), float(this_specimen_interpretation["dir_inc"]), calculation_type))
else:
print('%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %s \n' % (this_specimen_interpretation["specimen"], int(this_specimen_interpretation["dir_n_measurements"]), float(this_specimen_interpretation["dir_alpha95"]), float(
this_specimen_interpretation["meas_step_min"]), float(this_specimen_interpretation["meas_step_max"]), float(this_specimen_interpretation["dir_dec"]), float(this_specimen_interpretation["dir_inc"]), calculation_type))
if verbose:
saveit = input("Save this interpretation? [y]/n \n")
else:
print("no data", this_specimen)
if verbose:
pmagplotlib.draw_figs(ZED)
#res = input(' <return> for next specimen, [q]uit ')
res = input("S[a]ve plots, [q]uit, or <return> to continue ")
if res == 'a':
files = {plot_type: this_specimen + "_" + plot_type + "." + fmt for plot_type in ZED}
pmagplotlib.save_plots(ZED, files)
print("")
if res == 'q':
return
k += 1
#
if __name__ == "__main__":
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
'''
Created on Jul 22, 2017
@author: matija
'''
from object_oriented.base import Sequence as seq
from collections import Iterable
#C-2.25 impl
class Vector:
def __init__(self, d):
if isinstance(d, int):
self._coords = [0] * d
else:
try:
self._coords = [x for x in d]
except TypeError:
raise TypeError('invalid parameter type')
def __len__(self):
return len(self._coords)
def __add__(self, v):
res = Vector(self._coords)
if isinstance(v, Vector):
if len(res) != len(v):
raise ArithmeticError('vector dimensions must agree')
res._coords = [x+y for x,y in zip(res._coords, v._coords)]
elif isinstance(v, int):
res._coords = [x+v for x in res._coords]
else:
raise TypeError('invalid parameter type')
return res
def __mul__(self, v):
res = Vector(self._coords)
if isinstance(v, Vector):
if len(res) != len(v):
raise ArithmeticError('vector dimensions must agree')
res._coords = [x*y for x,y in zip(res._coords, v._coords)]
elif isinstance(v, int):
res._coords = [x*v for x in res._coords]
else:
raise TypeError('invalid parameter type')
return res
def __getitem__(self, i):
if i < 0:
i = len(self)+i
if not 0 <= i and i < len(self):
raise IndexError('index out of bounds')
return self._coords[i]
def __str__(self):
return str(self._coords)
#C-2.26
class reversedSeq(seq):
'''
Implemented as iterator instead of generator
Takes iterable as the constructor argument so we can reverse iterate string for example
'''
def __init__(self, source):
if not isinstance(source, Iterable):
raise TypeError('invalid parameter type')
self._iter = source
def __len__(self):
return len(self._iter)
def __getitem__(self, j):
j = len(self._iter) - j - 1
if not 0 <= j and j < len(self):
raise StopIteration
return self._iter[j]
if __name__ == '__main__':
#test C-2.25
v1 = Vector([1,2,3])
print(v1[0:3])
print(v1[-1])
v2 = Vector([3,3,3])
print(v1+v2)
print(v1+5)
print(v1*2)
print(v1*v2)
#test C-2.26
for j in reversedSeq([1,2,3,4,5]):
print(j)
rseq = iter(reversedSeq('matija'))
while True:
try:
print(next(rseq))
except StopIteration:
print('No more values')
break
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.model.doc import addchild
class DocType:
def __init__(self, doc, doclist=[]):
self.doc = doc
self.doclist = doclist
def get_item_specification_details(self):
self.doclist = self.doc.clear_table(self.doclist, 'qa_specification_details')
specification = webnotes.conn.sql("select specification, value from `tabItem Quality Inspection Parameter` \
where parent = '%s' order by idx" % (self.doc.item_code))
for d in specification:
child = addchild(self.doc, 'qa_specification_details', 'Quality Inspection Reading', self.doclist)
child.specification = d[0]
child.value = d[1]
child.status = 'Accepted'
def on_submit(self):
if self.doc.purchase_receipt_no:
webnotes.conn.sql("update `tabPurchase Receipt Item` t1, `tabPurchase Receipt` t2 set t1.qa_no = '%s', t2.modified = '%s' \
where t1.parent = '%s' and t1.item_code = '%s' and t1.parent = t2.name" \
% (self.doc.name, self.doc.modified, self.doc.purchase_receipt_no, self.doc.item_code))
def on_cancel(self):
if self.doc.purchase_receipt_no:
webnotes.conn.sql("update `tabPurchase Receipt Item` t1, `tabPurchase Receipt` t2 set t1.qa_no = '', t2.modified = '%s' \
where t1.parent = '%s' and t1.item_code = '%s' and t1.parent = t2.name" \
% (self.doc.modified, self.doc.purchase_receipt_no, self.doc.item_code))
def item_query(doctype, txt, searchfield, start, page_len, filters):
if filters.get("from"):
from webnotes.widgets.reportview import get_match_cond
filters.update({
"txt": txt,
"mcond": get_match_cond(filters["from"], searchfield),
"start": start,
"page_len": page_len
})
return webnotes.conn.sql("""select item_code from `tab%(from)s`
where parent='%(parent)s' and docstatus < 2 and item_code like '%%%(txt)s%%' %(mcond)s
order by item_code limit %(start)s, %(page_len)s""" % filters)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright (c) 2018 Mockito contributors
* This program is made available under the terms of the MIT License.
*/
package org.mockitousage;
import org.junit.jupiter.api.Nested;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import static org.assertj.core.api.Assertions.assertThat;
@ExtendWith(MockitoExtension.class)
class MultiLevelNestedTest {
@Mock private Runnable level1Mock;
@Nested
class Level2Class {
@Mock Runnable level2Mock;
@Test
void mocks_created() {
assertThat(level1Mock).isNotNull();
assertThat(level2Mock).isNotNull();
}
@Nested
class Level3Class {
@Mock Runnable level3Mock;
@Test
void mocks_created() {
assertThat(level1Mock).isNotNull();
assertThat(level2Mock).isNotNull();
assertThat(level3Mock).isNotNull();
}
}
}
}
|
java
|
github
|
https://github.com/mockito/mockito
|
mockito-extensions/mockito-junit-jupiter/src/test/java/org/mockitousage/MultiLevelNestedTest.java
|
from flask import Blueprint, render_template, redirect, url_for, current_app
from deployer import database as db
from deployer.routing.models import Route
from deployer.utils import xhr_form, allow_traffic, get_container_ip
from . import models, forms
views = Blueprint('apps', __name__, template_folder='templates')
def get_app(key):
query = db.Session().query(models.Application).filter_by(key=key)
return db.get_one_or_abort(query)
@views.route('/')
def index():
apps = db.Session().query(models.Application).all()
return render_template('applications.html', apps=apps)
@views.route('/<regex("[A-Z0-9]+"):app_key>/')
def app(app_key):
return render_template('application.html', app=get_app(app_key))
@views.route('/new/', methods=['GET', 'POST'], endpoint='new')
@views.route('/<regex("[A-Z0-9]+"):app_key>/edit/', methods=['GET', 'POST'])
@xhr_form
def edit(app_key=None):
create = app_key is None
app = None if create else get_app(app_key)
form = forms.ApplicationForm(obj=app)
if form.validate_on_submit():
session = db.Session()
if create:
app = models.Application()
form.populate_obj(app)
session.add(app)
session.commit()
return redirect(url_for('.app', app_key=app.key))
return render_template('edit-application.html',
form=form, create=create, app=app)
@views.route('/<regex("[A-Z0-9]+"):app_key>/builds/<build>/')
def build(app_key, build):
app = get_app(app_key)
build = db.get_one_or_abort(app.builds.filter_by(tag=build))
return render_template('build.html', app=app, build=build)
@views.route('/<regex("[A-Z0-9]+"):app_key>/templates/new/',
methods=['GET', 'POST'], endpoint='new_template')
@views.route('/<regex("[A-Z0-9]+"):app_key>/templates/<int:template_id>/edit/',
methods=['GET', 'POST'])
@xhr_form
def edit_template(app_key, template_id=None):
create = template_id is None
app = get_app(app_key)
if create:
template = None
else:
template = db.get_one_or_abort(app.templates.filter_by(id=template_id))
form = forms.TemplateForm(obj=template)
if form.validate_on_submit():
session = db.Session()
if create:
template = models.DeploymentTemplate(application=app)
form.populate_obj(template)
session.add(template)
session.commit()
return redirect(url_for('.app', app_key=app.key))
return render_template('edit-deployment-template.html',
form=form, create=create, app=app, tpl=template)
@views.route(
'/<regex("[A-Z0-9]+"):app_key>/templates/<int:template_id>/delete/',
methods=['GET', 'POST']
)
@xhr_form
def delete_template(app_key, template_id):
app = get_app(app_key)
template = db.get_one_or_abort(app.templates.filter_by(id=template_id))
form = forms.ConfirmationForm()
if form.validate_on_submit():
session = db.Session()
session.delete(template)
session.commit()
return redirect(url_for('.app', app_key=app.key))
return render_template('confirm-delete-template.html', form=form,
tpl=template)
@views.route('/<regex("[A-Z0-9]+"):app_key>/builds/<build>/deploy/',
methods=['GET', 'POST'])
@xhr_form
def deploy(app_key, build):
app = get_app(app_key)
build = db.get_one_or_abort(app.builds.filter_by(tag=build))
form = forms.DeploymentSetupForm(app)
if form.validate_on_submit():
instance = build.deploy(form.data['host'],
form.data['template'].template)
session = db.Session()
session.add(instance)
route = Route(instance=instance, routing_key=form.data['hostname'])
session.add(route)
client = instance.host.get_client()
child_ip = get_container_ip(client, instance.container)
parent_ip = get_container_ip(
client,
current_app.config['FRONTEND_NAME']
)
allow_traffic(parent_ip, child_ip, 5510)
session.commit()
route.update(current_app.config['FRONTEND_NAME'])
return redirect(url_for('.instance', app_key=app.key,
container_id=instance.container[:10]))
return render_template('deploy-setup.html', form=form, app=app,
build=build)
@views.route('/<regex("[A-Z0-9]+"):app_key>/instances/<container_id>/')
def instance(app_key, container_id):
app = get_app(app_key)
instance = db.get_one_or_abort(app.instances.filter(
models.Instance.container.startswith(container_id)))
return render_template('instance.html', app=app, instance=instance)
@views.route('/<regex("[A-Z0-9]+"):app_key>/instances/<container_id>/stop/',
methods=['GET', 'POST'])
@xhr_form
def stop(app_key, container_id):
app = get_app(app_key)
instance = db.get_one_or_abort(app.instances.filter(
models.Instance.container.startswith(container_id)))
form = forms.ConfirmationForm()
if form.validate_on_submit():
session = db.Session()
instance.stop()
for route in instance.routes:
route.update(current_app.config['FRONTEND_NAME'])
session.commit()
return redirect(url_for('.app', app_key=app.key))
return render_template('confirm-stop-instance.html', form=form,
instance=instance)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# (c) 2014, Maciej Delmanowski <drybjed@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from functools import partial
import types
try:
import netaddr
except ImportError:
# in this case, we'll make the filters return error messages (see bottom)
netaddr = None
else:
class mac_linux(netaddr.mac_unix):
pass
mac_linux.word_fmt = '%.2x'
from ansible import errors
# ---- IP address and network query helpers ----
def _empty_ipaddr_query(v, vtype):
# We don't have any query to process, so just check what type the user
# expects, and return the IP address in a correct format
if v:
if vtype == 'address':
return str(v.ip)
elif vtype == 'network':
return str(v)
def _6to4_query(v, vtype, value):
if v.version == 4:
if v.size == 1:
ipconv = str(v.ip)
elif v.size > 1:
if v.ip != v.network:
ipconv = str(v.ip)
else:
ipconv = False
if ipaddr(ipconv, 'public'):
numbers = list(map(int, ipconv.split('.')))
try:
return '2002:{:02x}{:02x}:{:02x}{:02x}::1/48'.format(*numbers)
except:
return False
elif v.version == 6:
if vtype == 'address':
if ipaddr(str(v), '2002::/16'):
return value
elif vtype == 'network':
if v.ip != v.network:
if ipaddr(str(v.ip), '2002::/16'):
return value
else:
return False
def _ip_query(v):
if v.size == 1:
return str(v.ip)
if v.size > 1:
# /31 networks in netaddr have no broadcast address
if v.ip != v.network or not v.broadcast:
return str(v.ip)
def _gateway_query(v):
if v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _bool_ipaddr_query(v):
if v:
return True
def _broadcast_query(v):
if v.size > 1:
return str(v.broadcast)
def _cidr_query(v):
return str(v)
def _cidr_lookup_query(v, iplist, value):
try:
if v in iplist:
return value
except:
return False
def _host_query(v):
if v.size == 1:
return str(v)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _hostmask_query(v):
return str(v.hostmask)
def _int_query(v, vtype):
if vtype == 'address':
return int(v.ip)
elif vtype == 'network':
return str(int(v.ip)) + '/' + str(int(v.prefixlen))
def _ipv4_query(v, value):
if v.version == 6:
try:
return str(v.ipv4())
except:
return False
else:
return value
def _ipv6_query(v, value):
if v.version == 4:
return str(v.ipv6())
else:
return value
def _link_local_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if v.version == 4:
if ipaddr(str(v_ip), '169.254.0.0/24'):
return value
elif v.version == 6:
if ipaddr(str(v_ip), 'fe80::/10'):
return value
def _loopback_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if v_ip.is_loopback():
return value
def _multicast_query(v, value):
if v.is_multicast():
return value
def _net_query(v):
if v.size > 1:
if v.ip == v.network:
return str(v.network) + '/' + str(v.prefixlen)
def _netmask_query(v):
return str(v.netmask)
def _network_query(v):
if v.size > 1:
return str(v.network)
def _prefix_query(v):
return int(v.prefixlen)
def _private_query(v, value):
if v.is_private():
return value
def _public_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if v_ip.is_unicast() and not v_ip.is_private() and \
not v_ip.is_loopback() and not v_ip.is_netmask() and \
not v_ip.is_hostmask():
return value
def _revdns_query(v):
v_ip = netaddr.IPAddress(str(v.ip))
return v_ip.reverse_dns
def _size_query(v):
return v.size
def _subnet_query(v):
return str(v.cidr)
def _type_query(v):
if v.size == 1:
return 'address'
if v.size > 1:
if v.ip != v.network:
return 'address'
else:
return 'network'
def _unicast_query(v, value):
if v.is_unicast():
return value
def _version_query(v):
return v.version
def _wrap_query(v, vtype, value):
if v.version == 6:
if vtype == 'address':
return '[' + str(v.ip) + ']'
elif vtype == 'network':
return '[' + str(v.ip) + ']/' + str(v.prefixlen)
else:
return value
# ---- HWaddr query helpers ----
def _bare_query(v):
v.dialect = netaddr.mac_bare
return str(v)
def _bool_hwaddr_query(v):
if v:
return True
def _int_hwaddr_query(v):
return int(v)
def _cisco_query(v):
v.dialect = netaddr.mac_cisco
return str(v)
def _empty_hwaddr_query(v, value):
if v:
return value
def _linux_query(v):
v.dialect = mac_linux
return str(v)
def _postgresql_query(v):
v.dialect = netaddr.mac_pgsql
return str(v)
def _unix_query(v):
v.dialect = netaddr.mac_unix
return str(v)
def _win_query(v):
v.dialect = netaddr.mac_eui48
return str(v)
# ---- IP address and network filters ----
def ipaddr(value, query = '', version = False, alias = 'ipaddr'):
''' Check if string is an IP address or network and filter it '''
query_func_extra_args = {
'': ('vtype',),
'6to4': ('vtype', 'value'),
'cidr_lookup': ('iplist', 'value'),
'int': ('vtype',),
'ipv4': ('value',),
'ipv6': ('value',),
'link-local': ('value',),
'loopback': ('value',),
'lo': ('value',),
'multicast': ('value',),
'private': ('value',),
'public': ('value',),
'unicast': ('value',),
'wrap': ('vtype', 'value'),
}
query_func_map = {
'': _empty_ipaddr_query,
'6to4': _6to4_query,
'address': _ip_query,
'address/prefix': _gateway_query,
'bool': _bool_ipaddr_query,
'broadcast': _broadcast_query,
'cidr': _cidr_query,
'cidr_lookup': _cidr_lookup_query,
'gateway': _gateway_query,
'gw': _gateway_query,
'host': _host_query,
'host/prefix': _gateway_query,
'hostmask': _hostmask_query,
'hostnet': _gateway_query,
'int': _int_query,
'ip': _ip_query,
'ipv4': _ipv4_query,
'ipv6': _ipv6_query,
'link-local': _link_local_query,
'lo': _loopback_query,
'loopback': _loopback_query,
'multicast': _multicast_query,
'net': _net_query,
'netmask': _netmask_query,
'network': _network_query,
'prefix': _prefix_query,
'private': _private_query,
'public': _public_query,
'revdns': _revdns_query,
'router': _gateway_query,
'size': _size_query,
'subnet': _subnet_query,
'type': _type_query,
'unicast': _unicast_query,
'v4': _ipv4_query,
'v6': _ipv6_query,
'version': _version_query,
'wrap': _wrap_query,
}
vtype = None
if not value:
return False
elif value == True:
return False
# Check if value is a list and parse each element
elif isinstance(value, (list, tuple, types.GeneratorType)):
_ret = []
for element in value:
if ipaddr(element, str(query), version):
_ret.append(ipaddr(element, str(query), version))
if _ret:
return _ret
else:
return list()
# Check if value is a number and convert it to an IP address
elif str(value).isdigit():
# We don't know what IP version to assume, so let's check IPv4 first,
# then IPv6
try:
if ((not version) or (version and version == 4)):
v = netaddr.IPNetwork('0.0.0.0/0')
v.value = int(value)
v.prefixlen = 32
elif version and version == 6:
v = netaddr.IPNetwork('::/0')
v.value = int(value)
v.prefixlen = 128
# IPv4 didn't work the first time, so it definitely has to be IPv6
except:
try:
v = netaddr.IPNetwork('::/0')
v.value = int(value)
v.prefixlen = 128
# The value is too big for IPv6. Are you a nanobot?
except:
return False
# We got an IP address, let's mark it as such
value = str(v)
vtype = 'address'
# value has not been recognized, check if it's a valid IP string
else:
try:
v = netaddr.IPNetwork(value)
# value is a valid IP string, check if user specified
# CIDR prefix or just an IP address, this will indicate default
# output format
try:
address, prefix = value.split('/')
vtype = 'network'
except:
vtype = 'address'
# value hasn't been recognized, maybe it's a numerical CIDR?
except:
try:
address, prefix = value.split('/')
address.isdigit()
address = int(address)
prefix.isdigit()
prefix = int(prefix)
# It's not numerical CIDR, give up
except:
return False
# It is something, so let's try and build a CIDR from the parts
try:
v = netaddr.IPNetwork('0.0.0.0/0')
v.value = address
v.prefixlen = prefix
# It's not a valid IPv4 CIDR
except:
try:
v = netaddr.IPNetwork('::/0')
v.value = address
v.prefixlen = prefix
# It's not a valid IPv6 CIDR. Give up.
except:
return False
# We have a valid CIDR, so let's write it in correct format
value = str(v)
vtype = 'network'
# We have a query string but it's not in the known query types. Check if
# that string is a valid subnet, if so, we can check later if given IP
# address/network is inside that specific subnet
try:
### ?? 6to4 and link-local were True here before. Should they still?
if query and (query not in query_func_map or query == 'cidr_lookup') and ipaddr(query, 'network'):
iplist = netaddr.IPSet([netaddr.IPNetwork(query)])
query = 'cidr_lookup'
except:
pass
# This code checks if value maches the IP version the user wants, ie. if
# it's any version ("ipaddr()"), IPv4 ("ipv4()") or IPv6 ("ipv6()")
# If version does not match, return False
if version and v.version != version:
return False
extras = []
for arg in query_func_extra_args.get(query, tuple()):
extras.append(locals()[arg])
try:
return query_func_map[query](v, *extras)
except KeyError:
try:
float(query)
if v.size == 1:
if vtype == 'address':
return str(v.ip)
elif vtype == 'network':
return str(v)
elif v.size > 1:
try:
return str(v[query]) + '/' + str(v.prefixlen)
except:
return False
else:
return value
except:
raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query)
return False
def ipwrap(value, query = ''):
try:
if isinstance(value, (list, tuple, types.GeneratorType)):
_ret = []
for element in value:
if ipaddr(element, query, version = False, alias = 'ipwrap'):
_ret.append(ipaddr(element, 'wrap'))
else:
_ret.append(element)
return _ret
else:
_ret = ipaddr(value, query, version = False, alias = 'ipwrap')
if _ret:
return ipaddr(_ret, 'wrap')
else:
return value
except:
return value
def ipv4(value, query = ''):
return ipaddr(value, query, version = 4, alias = 'ipv4')
def ipv6(value, query = ''):
return ipaddr(value, query, version = 6, alias = 'ipv6')
# Split given subnet into smaller subnets or find out the biggest subnet of
# a given IP address with given CIDR prefix
# Usage:
#
# - address or address/prefix | ipsubnet
# returns CIDR subnet of a given input
#
# - address/prefix | ipsubnet(cidr)
# returns number of possible subnets for given CIDR prefix
#
# - address/prefix | ipsubnet(cidr, index)
# returns new subnet with given CIDR prefix
#
# - address | ipsubnet(cidr)
# returns biggest subnet with given CIDR prefix that address belongs to
#
# - address | ipsubnet(cidr, index)
# returns next indexed subnet which contains given address
def ipsubnet(value, query = '', index = 'x'):
''' Manipulate IPv4/IPv6 subnets '''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
value = netaddr.IPNetwork(v)
except:
return False
if not query:
return str(value)
elif str(query).isdigit():
vsize = ipaddr(v, 'size')
query = int(query)
try:
float(index)
index = int(index)
if vsize > 1:
try:
return str(list(value.subnet(query))[index])
except:
return False
elif vsize == 1:
try:
return str(value.supernet(query)[index])
except:
return False
except:
if vsize > 1:
try:
return str(len(list(value.subnet(query))))
except:
return False
elif vsize == 1:
try:
return str(value.supernet(query)[0])
except:
return False
return False
# Returns the nth host within a network described by value.
# Usage:
#
# - address or address/prefix | nthhost(nth)
# returns the nth host within the given network
def nthhost(value, query=''):
''' Get the nth host within a given network '''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
value = netaddr.IPNetwork(v)
except:
return False
if not query:
return False
try:
nth = int(query)
if value.size > nth:
return value[nth]
except ValueError:
return False
return False
# Returns the SLAAC address within a network for a given HW/MAC address.
# Usage:
#
# - prefix | slaac(mac)
def slaac(value, query = ''):
''' Get the SLAAC address within given network '''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
if ipaddr(value, 'version') != 6:
return False
value = netaddr.IPNetwork(v)
except:
return False
if not query:
return False
try:
mac = hwaddr(query, alias = 'slaac')
eui = netaddr.EUI(mac)
except:
return False
return eui.ipv6(value.network)
# ---- HWaddr / MAC address filters ----
def hwaddr(value, query = '', alias = 'hwaddr'):
''' Check if string is a HW/MAC address and filter it '''
query_func_extra_args = {
'': ('value',),
}
query_func_map = {
'': _empty_hwaddr_query,
'bare': _bare_query,
'bool': _bool_hwaddr_query,
'int': _int_hwaddr_query,
'cisco': _cisco_query,
'eui48': _win_query,
'linux': _linux_query,
'pgsql': _postgresql_query,
'postgresql': _postgresql_query,
'psql': _postgresql_query,
'unix': _unix_query,
'win': _win_query,
}
try:
v = netaddr.EUI(value)
except:
if query and query != 'bool':
raise errors.AnsibleFilterError(alias + ': not a hardware address: %s' % value)
extras = []
for arg in query_func_extra_args.get(query, tuple()):
extras.append(locals()[arg])
try:
return query_func_map[query](v, *extras)
except KeyError:
raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query)
return False
def macaddr(value, query = ''):
return hwaddr(value, query, alias = 'macaddr')
def _need_netaddr(f_name, *args, **kwargs):
raise errors.AnsibleFilterError('The {0} filter requires python-netaddr be'
' installed on the ansible controller'.format(f_name))
def ip4_hex(arg):
''' Convert an IPv4 address to Hexadecimal notation '''
numbers = list(map(int, arg.split('.')))
return '{:02x}{:02x}{:02x}{:02x}'.format(*numbers)
# ---- Ansible filters ----
class FilterModule(object):
''' IP address and network manipulation filters '''
filter_map = {
# IP addresses and networks
'ipaddr': ipaddr,
'ipwrap': ipwrap,
'ipv4': ipv4,
'ipv6': ipv6,
'ipsubnet': ipsubnet,
'nthhost': nthhost,
'slaac': slaac,
'ip4_hex': ip4_hex,
# MAC / HW addresses
'hwaddr': hwaddr,
'macaddr': macaddr
}
def filters(self):
if netaddr:
return self.filter_map
else:
# Need to install python-netaddr for these filters to work
return dict((f, partial(_need_netaddr, f)) for f in self.filter_map)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#/*******************************************************************************
# * Copyright (C) 2021 Zhu Research Group @ Rutgers-Newark
# * All rights reserved.
# *
# * This file is part of fplib.
# *
# * Permission is hereby granted, free of charge, to any person obtaining a copy
# * of this software and associated documentation files (the "Software"), to deal
# * in the Software without restriction, including without limitation the rights
# * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# * copies of the Software, and to permit persons to whom the Software is
# * furnished to do so, subject to the following conditions:
# *
# * The above copyright notice and this permission notice shall be included in
# * all copies or substantial portions of the Software.
# *
# * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# * THE SOFTWARE.
# * ****************************************************************************/
from .fplib import (get_version,
get_lfp,
get_sfp,
get_nfp,
get_fp_dist)
__version__ = "%d.%d.%d" % get_version()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class filterpolicy_binding(base_resource):
""" Binding class showing the resources that can be bound to filterpolicy_binding.
"""
def __init__(self) :
self._name = ""
self.filterpolicy_csvserver_binding = []
self.filterpolicy_lbvserver_binding = []
self.filterpolicy_crvserver_binding = []
self.filterpolicy_filterglobal_binding = []
@property
def name(self) :
"""Name of the filter policy to be displayed. If a name is not provided, information about all the filter policies is shown.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the filter policy to be displayed. If a name is not provided, information about all the filter policies is shown.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def filterpolicy_crvserver_bindings(self) :
"""crvserver that can be bound to filterpolicy.
"""
try :
return self._filterpolicy_crvserver_binding
except Exception as e:
raise e
@property
def filterpolicy_lbvserver_bindings(self) :
"""lbvserver that can be bound to filterpolicy.
"""
try :
return self._filterpolicy_lbvserver_binding
except Exception as e:
raise e
@property
def filterpolicy_csvserver_bindings(self) :
"""csvserver that can be bound to filterpolicy.
"""
try :
return self._filterpolicy_csvserver_binding
except Exception as e:
raise e
@property
def filterpolicy_filterglobal_bindings(self) :
"""filterglobal that can be bound to filterpolicy.
"""
try :
return self._filterpolicy_filterglobal_binding
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(filterpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.filterpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(self, service, name) :
""" Use this API to fetch filterpolicy_binding resource.
"""
try :
if type(name) is not list :
obj = filterpolicy_binding()
obj.name = name
response = obj.get_resource(service)
else :
if name and len(name) > 0 :
obj = [filterpolicy_binding() for _ in range(len(name))]
for i in range(len(name)) :
obj[i].name = name[i];
response[i] = obj[i].get_resource(service)
return response
except Exception as e:
raise e
class filterpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.filterpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.filterpolicy_binding = [filterpolicy_binding() for _ in range(length)]
|
unknown
|
codeparrot/codeparrot-clean
| ||
//
// Code generated by grafana-app-sdk. DO NOT EDIT.
//
package v1beta1
import (
"fmt"
"github.com/grafana/grafana-app-sdk/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"time"
)
// +k8s:openapi-gen=true
type Folder struct {
metav1.TypeMeta `json:",inline" yaml:",inline"`
metav1.ObjectMeta `json:"metadata" yaml:"metadata"`
// Spec is the spec of the Folder
Spec FolderSpec `json:"spec" yaml:"spec"`
}
func NewFolder() *Folder {
return &Folder{
Spec: *NewFolderSpec(),
}
}
func (o *Folder) GetSpec() any {
return o.Spec
}
func (o *Folder) SetSpec(spec any) error {
cast, ok := spec.(FolderSpec)
if !ok {
return fmt.Errorf("cannot set spec type %#v, not of type Spec", spec)
}
o.Spec = cast
return nil
}
func (o *Folder) GetSubresources() map[string]any {
return map[string]any{}
}
func (o *Folder) GetSubresource(name string) (any, bool) {
switch name {
default:
return nil, false
}
}
func (o *Folder) SetSubresource(name string, value any) error {
switch name {
default:
return fmt.Errorf("subresource '%s' does not exist", name)
}
}
func (o *Folder) GetStaticMetadata() resource.StaticMetadata {
gvk := o.GroupVersionKind()
return resource.StaticMetadata{
Name: o.ObjectMeta.Name,
Namespace: o.ObjectMeta.Namespace,
Group: gvk.Group,
Version: gvk.Version,
Kind: gvk.Kind,
}
}
func (o *Folder) SetStaticMetadata(metadata resource.StaticMetadata) {
o.Name = metadata.Name
o.Namespace = metadata.Namespace
o.SetGroupVersionKind(schema.GroupVersionKind{
Group: metadata.Group,
Version: metadata.Version,
Kind: metadata.Kind,
})
}
func (o *Folder) GetCommonMetadata() resource.CommonMetadata {
dt := o.DeletionTimestamp
var deletionTimestamp *time.Time
if dt != nil {
deletionTimestamp = &dt.Time
}
// Legacy ExtraFields support
extraFields := make(map[string]any)
if o.Annotations != nil {
extraFields["annotations"] = o.Annotations
}
if o.ManagedFields != nil {
extraFields["managedFields"] = o.ManagedFields
}
if o.OwnerReferences != nil {
extraFields["ownerReferences"] = o.OwnerReferences
}
return resource.CommonMetadata{
UID: string(o.UID),
ResourceVersion: o.ResourceVersion,
Generation: o.Generation,
Labels: o.Labels,
CreationTimestamp: o.CreationTimestamp.Time,
DeletionTimestamp: deletionTimestamp,
Finalizers: o.Finalizers,
UpdateTimestamp: o.GetUpdateTimestamp(),
CreatedBy: o.GetCreatedBy(),
UpdatedBy: o.GetUpdatedBy(),
ExtraFields: extraFields,
}
}
func (o *Folder) SetCommonMetadata(metadata resource.CommonMetadata) {
o.UID = types.UID(metadata.UID)
o.ResourceVersion = metadata.ResourceVersion
o.Generation = metadata.Generation
o.Labels = metadata.Labels
o.CreationTimestamp = metav1.NewTime(metadata.CreationTimestamp)
if metadata.DeletionTimestamp != nil {
dt := metav1.NewTime(*metadata.DeletionTimestamp)
o.DeletionTimestamp = &dt
} else {
o.DeletionTimestamp = nil
}
o.Finalizers = metadata.Finalizers
if o.Annotations == nil {
o.Annotations = make(map[string]string)
}
if !metadata.UpdateTimestamp.IsZero() {
o.SetUpdateTimestamp(metadata.UpdateTimestamp)
}
if metadata.CreatedBy != "" {
o.SetCreatedBy(metadata.CreatedBy)
}
if metadata.UpdatedBy != "" {
o.SetUpdatedBy(metadata.UpdatedBy)
}
// Legacy support for setting Annotations, ManagedFields, and OwnerReferences via ExtraFields
if metadata.ExtraFields != nil {
if annotations, ok := metadata.ExtraFields["annotations"]; ok {
if cast, ok := annotations.(map[string]string); ok {
o.Annotations = cast
}
}
if managedFields, ok := metadata.ExtraFields["managedFields"]; ok {
if cast, ok := managedFields.([]metav1.ManagedFieldsEntry); ok {
o.ManagedFields = cast
}
}
if ownerReferences, ok := metadata.ExtraFields["ownerReferences"]; ok {
if cast, ok := ownerReferences.([]metav1.OwnerReference); ok {
o.OwnerReferences = cast
}
}
}
}
func (o *Folder) GetCreatedBy() string {
if o.ObjectMeta.Annotations == nil {
o.ObjectMeta.Annotations = make(map[string]string)
}
return o.ObjectMeta.Annotations["grafana.com/createdBy"]
}
func (o *Folder) SetCreatedBy(createdBy string) {
if o.ObjectMeta.Annotations == nil {
o.ObjectMeta.Annotations = make(map[string]string)
}
o.ObjectMeta.Annotations["grafana.com/createdBy"] = createdBy
}
func (o *Folder) GetUpdateTimestamp() time.Time {
if o.ObjectMeta.Annotations == nil {
o.ObjectMeta.Annotations = make(map[string]string)
}
parsed, _ := time.Parse(time.RFC3339, o.ObjectMeta.Annotations["grafana.com/updateTimestamp"])
return parsed
}
func (o *Folder) SetUpdateTimestamp(updateTimestamp time.Time) {
if o.ObjectMeta.Annotations == nil {
o.ObjectMeta.Annotations = make(map[string]string)
}
o.ObjectMeta.Annotations["grafana.com/updateTimestamp"] = updateTimestamp.Format(time.RFC3339)
}
func (o *Folder) GetUpdatedBy() string {
if o.ObjectMeta.Annotations == nil {
o.ObjectMeta.Annotations = make(map[string]string)
}
return o.ObjectMeta.Annotations["grafana.com/updatedBy"]
}
func (o *Folder) SetUpdatedBy(updatedBy string) {
if o.ObjectMeta.Annotations == nil {
o.ObjectMeta.Annotations = make(map[string]string)
}
o.ObjectMeta.Annotations["grafana.com/updatedBy"] = updatedBy
}
func (o *Folder) Copy() resource.Object {
return resource.CopyObject(o)
}
func (o *Folder) DeepCopyObject() runtime.Object {
return o.Copy()
}
func (o *Folder) DeepCopy() *Folder {
cpy := &Folder{}
o.DeepCopyInto(cpy)
return cpy
}
func (o *Folder) DeepCopyInto(dst *Folder) {
dst.TypeMeta.APIVersion = o.TypeMeta.APIVersion
dst.TypeMeta.Kind = o.TypeMeta.Kind
o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta)
o.Spec.DeepCopyInto(&dst.Spec)
}
func (Folder) OpenAPIModelName() string {
return "com.github.grafana.grafana.apps.folder.pkg.apis.folder.v1beta1.Folder"
}
// Interface compliance compile-time check
var _ resource.Object = &Folder{}
// +k8s:openapi-gen=true
type FolderList struct {
metav1.TypeMeta `json:",inline" yaml:",inline"`
metav1.ListMeta `json:"metadata" yaml:"metadata"`
Items []Folder `json:"items" yaml:"items"`
}
func (o *FolderList) DeepCopyObject() runtime.Object {
return o.Copy()
}
func (o *FolderList) Copy() resource.ListObject {
cpy := &FolderList{
TypeMeta: o.TypeMeta,
Items: make([]Folder, len(o.Items)),
}
o.ListMeta.DeepCopyInto(&cpy.ListMeta)
for i := 0; i < len(o.Items); i++ {
if item, ok := o.Items[i].Copy().(*Folder); ok {
cpy.Items[i] = *item
}
}
return cpy
}
func (o *FolderList) GetItems() []resource.Object {
items := make([]resource.Object, len(o.Items))
for i := 0; i < len(o.Items); i++ {
items[i] = &o.Items[i]
}
return items
}
func (o *FolderList) SetItems(items []resource.Object) {
o.Items = make([]Folder, len(items))
for i := 0; i < len(items); i++ {
o.Items[i] = *items[i].(*Folder)
}
}
func (o *FolderList) DeepCopy() *FolderList {
cpy := &FolderList{}
o.DeepCopyInto(cpy)
return cpy
}
func (o *FolderList) DeepCopyInto(dst *FolderList) {
resource.CopyObjectInto(dst, o)
}
func (FolderList) OpenAPIModelName() string {
return "com.github.grafana.grafana.apps.folder.pkg.apis.folder.v1beta1.FolderList"
}
// Interface compliance compile-time check
var _ resource.ListObject = &FolderList{}
// Copy methods for all subresource types
// DeepCopy creates a full deep copy of Spec
func (s *FolderSpec) DeepCopy() *FolderSpec {
cpy := &FolderSpec{}
s.DeepCopyInto(cpy)
return cpy
}
// DeepCopyInto deep copies Spec into another Spec object
func (s *FolderSpec) DeepCopyInto(dst *FolderSpec) {
resource.CopyObjectInto(dst, s)
}
|
go
|
github
|
https://github.com/grafana/grafana
|
apps/folder/pkg/apis/folder/v1beta1/folder_object_gen.go
|
# -*- coding: utf-8 -*-
# (c) 2018 Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import datetime
import os
from ansible.module_utils.urls import Request, open_url, urllib_request, HAS_SSLCONTEXT, cookiejar, ConnectionError, RequestWithMethod
from ansible.module_utils.urls import SSLValidationHandler, HTTPSClientAuthHandler, RedirectHandlerFactory
import pytest
from mock import call
if HAS_SSLCONTEXT:
import ssl
@pytest.fixture
def urlopen_mock(mocker):
return mocker.patch('ansible.module_utils.urls.urllib_request.urlopen')
@pytest.fixture
def install_opener_mock(mocker):
return mocker.patch('ansible.module_utils.urls.urllib_request.install_opener')
def test_Request_fallback(urlopen_mock, install_opener_mock, mocker):
cookies = cookiejar.CookieJar()
request = Request(
headers={'foo': 'bar'},
use_proxy=False,
force=True,
timeout=100,
validate_certs=False,
url_username='user',
url_password='passwd',
http_agent='ansible-tests',
force_basic_auth=True,
follow_redirects='all',
client_cert='/tmp/client.pem',
client_key='/tmp/client.key',
cookies=cookies,
)
fallback_mock = mocker.spy(request, '_fallback')
r = request.open('GET', 'https://ansible.com')
calls = [
call(None, False), # use_proxy
call(None, True), # force
call(None, 100), # timeout
call(None, False), # validate_certs
call(None, 'user'), # url_username
call(None, 'passwd'), # url_password
call(None, 'ansible-tests'), # http_agent
call(None, True), # force_basic_auth
call(None, 'all'), # follow_redirects
call(None, '/tmp/client.pem'), # client_cert
call(None, '/tmp/client.key'), # client_key
call(None, cookies), # cookies
]
fallback_mock.assert_has_calls(calls)
assert fallback_mock.call_count == 12 # All but headers use fallback
args = urlopen_mock.call_args[0]
assert args[1] is None # data, this is handled in the Request not urlopen
assert args[2] == 100 # timeout
req = args[0]
assert req.headers == {
'Authorization': b'Basic dXNlcjpwYXNzd2Q=',
'Cache-control': 'no-cache',
'Foo': 'bar',
'User-agent': 'ansible-tests'
}
assert req.data is None
assert req.get_method() == 'GET'
def test_Request_open(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'https://ansible.com/')
args = urlopen_mock.call_args[0]
assert args[1] is None # data, this is handled in the Request not urlopen
assert args[2] == 10 # timeout
req = args[0]
assert req.headers == {}
assert req.data is None
assert req.get_method() == 'GET'
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
expected_handlers = (
SSLValidationHandler,
RedirectHandlerFactory(), # factory, get handler
)
found_handlers = []
for handler in handlers:
if isinstance(handler, SSLValidationHandler) or handler.__class__.__name__ == 'RedirectHandler':
found_handlers.append(handler)
assert len(found_handlers) == 2
def test_Request_open_http(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'http://ansible.com/')
args = urlopen_mock.call_args[0]
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
found_handlers = []
for handler in handlers:
if isinstance(handler, SSLValidationHandler):
found_handlers.append(handler)
assert len(found_handlers) == 0
def test_Request_open_ftp(urlopen_mock, install_opener_mock, mocker):
mocker.patch('ansible.module_utils.urls.ParseResultDottedDict.as_list', side_effect=AssertionError)
# Using ftp scheme should prevent the AssertionError side effect to fire
r = Request().open('GET', 'ftp://foo@ansible.com/')
def test_Request_open_headers(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'http://ansible.com/', headers={'Foo': 'bar'})
args = urlopen_mock.call_args[0]
req = args[0]
assert req.headers == {'Foo': 'bar'}
def test_Request_open_username(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'http://ansible.com/', url_username='user')
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
expected_handlers = (
urllib_request.HTTPBasicAuthHandler,
urllib_request.HTTPDigestAuthHandler,
)
found_handlers = []
for handler in handlers:
if isinstance(handler, expected_handlers):
found_handlers.append(handler)
assert len(found_handlers) == 2
assert found_handlers[0].passwd.passwd[None] == {(('ansible.com', '/'),): ('user', None)}
def test_Request_open_username_in_url(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'http://user2@ansible.com/')
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
expected_handlers = (
urllib_request.HTTPBasicAuthHandler,
urllib_request.HTTPDigestAuthHandler,
)
found_handlers = []
for handler in handlers:
if isinstance(handler, expected_handlers):
found_handlers.append(handler)
assert found_handlers[0].passwd.passwd[None] == {(('ansible.com', '/'),): ('user2', '')}
def test_Request_open_username_force_basic(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'http://ansible.com/', url_username='user', url_password='passwd', force_basic_auth=True)
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
expected_handlers = (
urllib_request.HTTPBasicAuthHandler,
urllib_request.HTTPDigestAuthHandler,
)
found_handlers = []
for handler in handlers:
if isinstance(handler, expected_handlers):
found_handlers.append(handler)
assert len(found_handlers) == 0
args = urlopen_mock.call_args[0]
req = args[0]
assert req.headers.get('Authorization') == b'Basic dXNlcjpwYXNzd2Q='
def test_Request_open_auth_in_netloc(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'http://user:passwd@ansible.com/')
args = urlopen_mock.call_args[0]
req = args[0]
assert req.get_full_url() == 'http://ansible.com/'
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
expected_handlers = (
urllib_request.HTTPBasicAuthHandler,
urllib_request.HTTPDigestAuthHandler,
)
found_handlers = []
for handler in handlers:
if isinstance(handler, expected_handlers):
found_handlers.append(handler)
assert len(found_handlers) == 2
def test_Request_open_netrc(urlopen_mock, install_opener_mock, monkeypatch):
here = os.path.dirname(__file__)
monkeypatch.setenv('NETRC', os.path.join(here, 'fixtures/netrc'))
r = Request().open('GET', 'http://ansible.com/')
args = urlopen_mock.call_args[0]
req = args[0]
assert req.headers.get('Authorization') == b'Basic dXNlcjpwYXNzd2Q='
r = Request().open('GET', 'http://foo.ansible.com/')
args = urlopen_mock.call_args[0]
req = args[0]
assert 'Authorization' not in req.headers
monkeypatch.setenv('NETRC', os.path.join(here, 'fixtures/netrc.nonexistant'))
r = Request().open('GET', 'http://ansible.com/')
args = urlopen_mock.call_args[0]
req = args[0]
assert 'Authorization' not in req.headers
def test_Request_open_no_proxy(urlopen_mock, install_opener_mock, mocker):
build_opener_mock = mocker.patch('ansible.module_utils.urls.urllib_request.build_opener')
r = Request().open('GET', 'http://ansible.com/', use_proxy=False)
handlers = build_opener_mock.call_args[0]
found_handlers = []
for handler in handlers:
if isinstance(handler, urllib_request.ProxyHandler):
found_handlers.append(handler)
assert len(found_handlers) == 1
@pytest.mark.skipif(not HAS_SSLCONTEXT, reason="requires SSLContext")
def test_Request_open_no_validate_certs(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'https://ansible.com/', validate_certs=False)
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
ssl_handler = None
for handler in handlers:
if isinstance(handler, HTTPSClientAuthHandler):
ssl_handler = handler
break
assert ssl_handler is not None
context = ssl_handler._context
assert context.protocol == ssl.PROTOCOL_SSLv23
if ssl.OP_NO_SSLv2:
assert context.options & ssl.OP_NO_SSLv2
assert context.options & ssl.OP_NO_SSLv3
assert context.verify_mode == ssl.CERT_NONE
assert context.check_hostname is False
def test_Request_open_client_cert(urlopen_mock, install_opener_mock):
here = os.path.dirname(__file__)
client_cert = os.path.join(here, 'fixtures/client.pem')
client_key = os.path.join(here, 'fixtures/client.key')
r = Request().open('GET', 'https://ansible.com/', client_cert=client_cert, client_key=client_key)
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
ssl_handler = None
for handler in handlers:
if isinstance(handler, HTTPSClientAuthHandler):
ssl_handler = handler
break
assert ssl_handler is not None
assert ssl_handler.client_cert == client_cert
assert ssl_handler.client_key == client_key
https_connection = ssl_handler._build_https_connection('ansible.com')
assert https_connection.key_file == client_key
assert https_connection.cert_file == client_cert
def test_Request_open_cookies(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'https://ansible.com/', cookies=cookiejar.CookieJar())
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
cookies_handler = None
for handler in handlers:
if isinstance(handler, urllib_request.HTTPCookieProcessor):
cookies_handler = handler
break
assert cookies_handler is not None
def test_Request_open_invalid_method(urlopen_mock, install_opener_mock):
with pytest.raises(ConnectionError):
r = Request().open('BOGUS', 'https://ansible.com/')
def test_Request_open_custom_method(urlopen_mock, install_opener_mock):
r = Request().open('DELETE', 'https://ansible.com/')
args = urlopen_mock.call_args[0]
req = args[0]
assert isinstance(req, RequestWithMethod)
def test_Request_open_user_agent(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'https://ansible.com/', http_agent='ansible-tests')
args = urlopen_mock.call_args[0]
req = args[0]
assert req.headers.get('User-agent') == 'ansible-tests'
def test_Request_open_force(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'https://ansible.com/', force=True, last_mod_time=datetime.datetime.now())
args = urlopen_mock.call_args[0]
req = args[0]
assert req.headers.get('Cache-control') == 'no-cache'
assert 'If-modified-since' not in req.headers
def test_Request_open_last_mod(urlopen_mock, install_opener_mock):
now = datetime.datetime.now()
r = Request().open('GET', 'https://ansible.com/', last_mod_time=now)
args = urlopen_mock.call_args[0]
req = args[0]
assert req.headers.get('If-modified-since') == now.strftime('%a, %d %b %Y %H:%M:%S +0000')
def test_Request_open_headers_not_dict(urlopen_mock, install_opener_mock):
with pytest.raises(ValueError):
Request().open('GET', 'https://ansible.com/', headers=['bob'])
def test_Request_init_headers_not_dict(urlopen_mock, install_opener_mock):
with pytest.raises(ValueError):
Request(headers=['bob'])
@pytest.mark.parametrize('method,kwargs', [
('get', {}),
('options', {}),
('head', {}),
('post', {'data': None}),
('put', {'data': None}),
('patch', {'data': None}),
('delete', {}),
])
def test_methods(method, kwargs, mocker):
expected = method.upper()
open_mock = mocker.patch('ansible.module_utils.urls.Request.open')
request = Request()
getattr(request, method)('https://ansible.com')
open_mock.assert_called_once_with(expected, 'https://ansible.com', **kwargs)
def test_open_url(urlopen_mock, install_opener_mock, mocker):
req_mock = mocker.patch('ansible.module_utils.urls.Request.open')
open_url('https://ansible.com/')
req_mock.assert_called_once_with('GET', 'https://ansible.com/', data=None, headers=None, use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None,
force_basic_auth=False, follow_redirects='urllib2',
client_cert=None, client_key=None, cookies=None)
|
unknown
|
codeparrot/codeparrot-clean
| ||
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
use Symfony\Bundle\FrameworkBundle\FrameworkBundle;
use Symfony\Bundle\SecurityBundle\SecurityBundle;
use Symfony\Bundle\SecurityBundle\Tests\Functional\Bundle\RememberMeBundle\RememberMeBundle;
return [
new FrameworkBundle(),
new SecurityBundle(),
new RememberMeBundle(),
];
|
php
|
github
|
https://github.com/symfony/symfony
|
src/Symfony/Bundle/SecurityBundle/Tests/Functional/app/RememberMe/bundles.php
|
# -*- coding: utf-8 -*-
"""
jinja2.debug
~~~~~~~~~~~~
Implements the debug interface for Jinja. This module does some pretty
ugly stuff with the Python traceback system in order to achieve tracebacks
with correct line numbers, locals and contents.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import sys
import traceback
from jinja2.utils import CodeType, missing, internal_code
from jinja2.exceptions import TemplateSyntaxError
# how does the raise helper look like?
try:
exec "raise TypeError, 'foo'"
except SyntaxError:
raise_helper = 'raise __jinja_exception__[1]'
except TypeError:
raise_helper = 'raise __jinja_exception__[0], __jinja_exception__[1]'
class TracebackFrameProxy(object):
"""Proxies a traceback frame."""
def __init__(self, tb):
self.tb = tb
def _set_tb_next(self, next):
if tb_set_next is not None:
tb_set_next(self.tb, next and next.tb or None)
self._tb_next = next
def _get_tb_next(self):
return self._tb_next
tb_next = property(_get_tb_next, _set_tb_next)
del _get_tb_next, _set_tb_next
@property
def is_jinja_frame(self):
return '__jinja_template__' in self.tb.tb_frame.f_globals
def __getattr__(self, name):
return getattr(self.tb, name)
class ProcessedTraceback(object):
"""Holds a Jinja preprocessed traceback for priting or reraising."""
def __init__(self, exc_type, exc_value, frames):
assert frames, 'no frames for this traceback?'
self.exc_type = exc_type
self.exc_value = exc_value
self.frames = frames
def chain_frames(self):
"""Chains the frames. Requires ctypes or the debugsupport extension."""
prev_tb = None
for tb in self.frames:
if prev_tb is not None:
prev_tb.tb_next = tb
prev_tb = tb
prev_tb.tb_next = None
def render_as_text(self, limit=None):
"""Return a string with the traceback."""
lines = traceback.format_exception(self.exc_type, self.exc_value,
self.frames[0], limit=limit)
return ''.join(lines).rstrip()
def render_as_html(self, full=False):
"""Return a unicode string with the traceback as rendered HTML."""
from jinja2.debugrenderer import render_traceback
return u'%s\n\n<!--\n%s\n-->' % (
render_traceback(self, full=full),
self.render_as_text().decode('utf-8', 'replace')
)
@property
def is_template_syntax_error(self):
"""`True` if this is a template syntax error."""
return isinstance(self.exc_value, TemplateSyntaxError)
@property
def exc_info(self):
"""Exception info tuple with a proxy around the frame objects."""
return self.exc_type, self.exc_value, self.frames[0]
@property
def standard_exc_info(self):
"""Standard python exc_info for re-raising"""
return self.exc_type, self.exc_value, self.frames[0].tb
def make_traceback(exc_info, source_hint=None):
"""Creates a processed traceback object from the exc_info."""
exc_type, exc_value, tb = exc_info
if isinstance(exc_value, TemplateSyntaxError):
exc_info = translate_syntax_error(exc_value, source_hint)
initial_skip = 0
else:
initial_skip = 1
return translate_exception(exc_info, initial_skip)
def translate_syntax_error(error, source=None):
"""Rewrites a syntax error to please traceback systems."""
error.source = source
error.translated = True
exc_info = (error.__class__, error, None)
filename = error.filename
if filename is None:
filename = '<unknown>'
return fake_exc_info(exc_info, filename, error.lineno)
def translate_exception(exc_info, initial_skip=0):
"""If passed an exc_info it will automatically rewrite the exceptions
all the way down to the correct line numbers and frames.
"""
tb = exc_info[2]
frames = []
# skip some internal frames if wanted
for x in xrange(initial_skip):
if tb is not None:
tb = tb.tb_next
initial_tb = tb
while tb is not None:
# skip frames decorated with @internalcode. These are internal
# calls we can't avoid and that are useless in template debugging
# output.
if tb.tb_frame.f_code in internal_code:
tb = tb.tb_next
continue
# save a reference to the next frame if we override the current
# one with a faked one.
next = tb.tb_next
# fake template exceptions
template = tb.tb_frame.f_globals.get('__jinja_template__')
if template is not None:
lineno = template.get_corresponding_lineno(tb.tb_lineno)
tb = fake_exc_info(exc_info[:2] + (tb,), template.filename,
lineno)[2]
frames.append(TracebackFrameProxy(tb))
tb = next
# if we don't have any exceptions in the frames left, we have to
# reraise it unchanged.
# XXX: can we backup here? when could this happen?
if not frames:
raise exc_info[0], exc_info[1], exc_info[2]
traceback = ProcessedTraceback(exc_info[0], exc_info[1], frames)
if tb_set_next is not None:
traceback.chain_frames()
return traceback
def fake_exc_info(exc_info, filename, lineno):
"""Helper for `translate_exception`."""
exc_type, exc_value, tb = exc_info
# figure the real context out
if tb is not None:
real_locals = tb.tb_frame.f_locals.copy()
ctx = real_locals.get('context')
if ctx:
locals = ctx.get_all()
else:
locals = {}
for name, value in real_locals.iteritems():
if name.startswith('l_') and value is not missing:
locals[name[2:]] = value
# if there is a local called __jinja_exception__, we get
# rid of it to not break the debug functionality.
locals.pop('__jinja_exception__', None)
else:
locals = {}
# assamble fake globals we need
globals = {
'__name__': filename,
'__file__': filename,
'__jinja_exception__': exc_info[:2],
# we don't want to keep the reference to the template around
# to not cause circular dependencies, but we mark it as Jinja
# frame for the ProcessedTraceback
'__jinja_template__': None
}
# and fake the exception
code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec')
# if it's possible, change the name of the code. This won't work
# on some python environments such as google appengine
try:
if tb is None:
location = 'template'
else:
function = tb.tb_frame.f_code.co_name
if function == 'root':
location = 'top-level template code'
elif function.startswith('block_'):
location = 'block "%s"' % function[6:]
else:
location = 'template'
code = CodeType(0, code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, code.co_consts,
code.co_names, code.co_varnames, filename,
location, code.co_firstlineno,
code.co_lnotab, (), ())
except:
pass
# execute the code and catch the new traceback
try:
exec code in globals, locals
except:
exc_info = sys.exc_info()
new_tb = exc_info[2].tb_next
# return without this frame
return exc_info[:2] + (new_tb,)
def _init_ugly_crap():
"""This function implements a few ugly things so that we can patch the
traceback objects. The function returned allows resetting `tb_next` on
any python traceback object.
"""
import ctypes
from types import TracebackType
# figure out side of _Py_ssize_t
if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'):
_Py_ssize_t = ctypes.c_int64
else:
_Py_ssize_t = ctypes.c_int
# regular python
class _PyObject(ctypes.Structure):
pass
_PyObject._fields_ = [
('ob_refcnt', _Py_ssize_t),
('ob_type', ctypes.POINTER(_PyObject))
]
# python with trace
if hasattr(sys, 'getobjects'):
class _PyObject(ctypes.Structure):
pass
_PyObject._fields_ = [
('_ob_next', ctypes.POINTER(_PyObject)),
('_ob_prev', ctypes.POINTER(_PyObject)),
('ob_refcnt', _Py_ssize_t),
('ob_type', ctypes.POINTER(_PyObject))
]
class _Traceback(_PyObject):
pass
_Traceback._fields_ = [
('tb_next', ctypes.POINTER(_Traceback)),
('tb_frame', ctypes.POINTER(_PyObject)),
('tb_lasti', ctypes.c_int),
('tb_lineno', ctypes.c_int)
]
def tb_set_next(tb, next):
"""Set the tb_next attribute of a traceback object."""
if not (isinstance(tb, TracebackType) and
(next is None or isinstance(next, TracebackType))):
raise TypeError('tb_set_next arguments must be traceback objects')
obj = _Traceback.from_address(id(tb))
if tb.tb_next is not None:
old = _Traceback.from_address(id(tb.tb_next))
old.ob_refcnt -= 1
if next is None:
obj.tb_next = ctypes.POINTER(_Traceback)()
else:
next = _Traceback.from_address(id(next))
next.ob_refcnt += 1
obj.tb_next = ctypes.pointer(next)
return tb_set_next
# try to get a tb_set_next implementation
try:
from jinja2._debugsupport import tb_set_next
except ImportError:
try:
tb_set_next = _init_ugly_crap()
except:
tb_set_next = None
del _init_ugly_crap
|
unknown
|
codeparrot/codeparrot-clean
| ||
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v0alpha1",
"metadata": {
"name": "v40.refresh_empty_string.v42"
},
"spec": {
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"links": [],
"panels": [],
"refresh": "",
"schemaVersion": 42,
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "Empty String Refresh Test Dashboard",
"weekStart": ""
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v1beta1"
}
}
}
|
json
|
github
|
https://github.com/grafana/grafana
|
apps/dashboard/pkg/migration/conversion/testdata/migrated_dashboards_output/v1beta1-mig-v40.refresh_empty_string.v42.v0alpha1.json
|
"""Mutual exclusion -- for use with module sched
A mutex has two pieces of state -- a 'locked' bit and a queue.
When the mutex is not locked, the queue is empty.
Otherwise, the queue contains 0 or more (function, argument) pairs
representing functions (or methods) waiting to acquire the lock.
When the mutex is unlocked while the queue is not empty,
the first queue entry is removed and its function(argument) pair called,
implying it now has the lock.
Of course, no multi-threading is implied -- hence the funny interface
for lock, where a function is called once the lock is aquired.
"""
from warnings import warnpy3k
warnpy3k("the mutex module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
from collections import deque
class mutex:
def __init__(self):
"""Create a new mutex -- initially unlocked."""
self.locked = 0
self.queue = deque()
def test(self):
"""Test the locked bit of the mutex."""
return self.locked
def testandset(self):
"""Atomic test-and-set -- grab the lock if it is not set,
return True if it succeeded."""
if not self.locked:
self.locked = 1
return True
else:
return False
def lock(self, function, argument):
"""Lock a mutex, call the function with supplied argument
when it is acquired. If the mutex is already locked, place
function and argument in the queue."""
if self.testandset():
function(argument)
else:
self.queue.append((function, argument))
def unlock(self):
"""Unlock a mutex. If the queue is not empty, call the next
function with its argument."""
if self.queue:
function, argument = self.queue.popleft()
function(argument)
else:
self.locked = 0
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
Copyright (C) 2012 Alan J Lockett
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
class fact(object):
"""
A memoized version of factorial.
In general, scipy's implementation of the gamma function may be
a better idea to use (or gammaln for large numbers).
"""
def __init__(self):
self.stored = [1,1,2,6]
def __call__(self, n):
n = int(n)
if n < 0: raise Exception, "Bad input to factorial"
if n < len(self.stored):
return self.stored[n]
return n * self(n-1)
def choose(self, n,m):
return self(n) / (self(m) * self(n-m))
factorial = fact()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Angular coding style guide
## Introduction
This guide covers a range of style conventions for Angular application code. These recommendations
are not required for Angular to work, but instead establish a set of coding practices that promote
consistency across the Angular ecosystem. A consistent set of practices makes it easier to share
code and move between projects.
This guide does _not_ cover TypeScript or general coding practices unrelated to Angular. For
TypeScript, check
out [Google's TypeScript style guide](https://google.github.io/styleguide/tsguide.html).
### When in doubt, prefer consistency
Whenever you encounter a situation in which these rules contradict the style of a particular file,
prioritize maintaining consistency within a file. Mixing different style conventions in a single
file creates more confusion than diverging from the recommendations in this guide.
## Naming
### Separate words in file names with hyphens
Separate words within a file name with hyphens (`-`). For example, a component named `UserProfile`
has a file name `user-profile.ts`.
### Use the same name for a file's tests with `.spec` at the end
For unit tests, end file names with `.spec.ts`. For example, the unit test file for
the `UserProfile` component has the file name `user-profile.spec.ts`.
### Match file names to the TypeScript identifier within
File names should generally describe the contents of the code in the file. When the file contains a
TypeScript class, the file name should reflect that class name. For example, a file containing a
component named `UserProfile` has the name `user-profile.ts`.
If the file contains more than one primary namable identifier, choose a name that describes the
common theme to the code within. If the code in a file does not fit within a common theme or feature
area, consider breaking the code up into different files. Avoid overly generic file names
like `helpers.ts`, `utils.ts`, or `common.ts`.
### Use the same file name for a component's TypeScript, template, and styles
Components typically consist of one TypeScript file, one template file, and one style file. These
files should share the same name with different file extensions. For example, a `UserProfile`
component can have the files `user-profile.ts`, `user-profile.html`, and `user-profile.css`.
If a component has more than one style file, append the name with additional words that describe the
styles specific to that file. For example, `UserProfile` might have style
files `user-profile-settings.css` and `user-profile-subscription.css`.
## Project structure
### All the application's code goes in a directory named `src`
All of your Angular UI code (TypeScript, HTML, and styles) should live inside a directory
named `src`. Code that's not related to UI, such as configuration files or scripts, should live
outside the `src` directory.
This keeps the root application directory consistent between different Angular projects and creates
a clear separation between UI code and other code in your project.
### Bootstrap your application in a file named `main.ts` directly inside `src`
The code to start up, or **bootstrap**, an Angular application should always live in a file
named `main.ts`. This represents the primary entry point to the application.
### Group closely related files together in the same directory
Angular components consist of a TypeScript file and, optionally, a template and one or more style
files. You should group these together in the same directory.
Unit tests should live in the same directory as the code-under-test. Avoid collecting unrelated
tests into a single `tests` directory.
### Organize your project by feature areas
Organize your project into subdirectories based on the features of your application or common themes
to the code in those directories. For example, the project structure for a movie theater site,
MovieReel, might look like this:
```
src/
├─ movie-reel/
│ ├─ show-times/
│ │ ├─ film-calendar/
│ │ ├─ film-details/
│ ├─ reserve-tickets/
│ │ ├─ payment-info/
│ │ ├─ purchase-confirmation/
```
Avoid creating subdirectories based on the type of code that lives in those directories. For
example, avoid creating directories like `components`, `directives`, and `services`.
Avoid putting so many files into one directory that it becomes hard to read or navigate. As the
number of files in a directory grows, consider splitting further into additional sub-directories.
### One concept per file
Prefer focusing source files on a single _concept_. For Angular classes specifically, this usually
means one component, directive, or service per file. However, it's okay if a file contains more than
one component or directive if your classes are relatively small and they tie together as part of a
single concept.
When in doubt, go with the approach that leads to smaller files.
## Dependency injection
### Prefer the `inject` function over constructor parameter injection
Prefer using the [`inject`](/api/core/inject) function over injecting constructor parameters. The [`inject`](/api/core/inject) function works the same way as constructor parameter injection, but offers several style advantages:
- [`inject`](/api/core/inject) is generally more readable, especially when a class injects many dependencies.
- It's more syntactically straightforward to add comments to injected dependencies
- [`inject`](/api/core/inject) offers better type inference.
- When targeting ES2022+ with [`useDefineForClassFields`](https://www.typescriptlang.org/tsconfig/#useDefineForClassFields), you can avoid separating field declaration and initialization when fields read on injected dependencies.
[You can refactor existing code to `inject` with an automatic tool](reference/migrations/inject-function).
## Components and directives
### Choosing component selectors
See
the [Components guide for details on choosing component selectors](guide/components/selectors#choosing-a-selector).
### Naming component and directive members
See the Components guide for details
on [naming input properties](guide/components/inputs#choosing-input-names)
and [naming output properties](guide/components/outputs#choosing-event-names).
### Choosing directive selectors
Directives should use the
same [application-specific prefix](guide/components/selectors#selector-prefixes)
as your components.
When using an attribute selector for a directive, use a camelCase attribute name. For example, if
your application is named "MovieReel" and you build a directive that adds a tooltip to an element,
you might use the selector `[mrTooltip]`.
### Group Angular-specific properties before methods
Components and directives should group Angular-specific properties together, typically near the top
of the class declaration. This includes injected dependencies, inputs, outputs, and queries. Define
these and other properties before the class's methods.
This practice makes it easier to find the class's template APIs and dependencies.
### Keep components and directives focused on presentation
Code inside your components and directives should generally relate to the UI shown on the page. For
code that makes sense on its own, decoupled from the UI, prefer refactoring to other files. For
example, you can factor form validation rules or data transformations into separate functions or
classes.
### Avoid overly complex logic in templates
Angular templates are designed to
accommodate [JavaScript-like expressions](guide/templates/expression-syntax).
You should take advantage of these expressions to capture relatively straightforward logic directly
in template expressions.
When the code in a template gets too complex, though, refactor logic into the TypeScript code (typically with a [computed](guide/signals#computed-signals)).
There's no one hard-and-fast rule that determines what constitutes "complex". Use your best
judgement.
### Use `protected` on class members that are only used by a component's template
A component class's public members intrinsically define a public API that's accessible via
dependency injection and [queries](guide/components/queries). Prefer `protected`
access for any members that are meant to be read from the component's template.
```ts
@Component({
...,
template: `<p>{{ fullName() }}</p>`,
})
export class UserProfile {
firstName = input();
lastName = input();
// `fullName` is not part of the component's public API, but is used in the template.
protected fullName = computed(() => `${this.firstName()} ${this.lastName()}`);
}
```
### Use `readonly` for properties that shouldn't change
Mark component and directive properties initialized by Angular as `readonly`. This includes
properties initialized by `input`, `model`, `output`, and queries. The readonly access modifier
ensures that the value set by Angular is not overwritten.
```ts
@Component({
/*...*/
})
export class UserProfile {
readonly userId = input();
readonly userSaved = output();
readonly userName = model();
}
```
For components and directives that use the decorator-based `@Input`, `@Output`, and query APIs, this
advice applies to output properties and queries, but not input properties.
```ts
@Component({
/*...*/
})
export class UserProfile {
@Output() readonly userSaved = new EventEmitter<void>();
@ViewChildren(PaymentMethod) readonly paymentMethods?: QueryList<PaymentMethod>;
}
```
### Prefer `class` and `style` over `ngClass` and `ngStyle`
Prefer `class` and `style` bindings over using the [`NgClass`](/api/common/NgClass) and [`NgStyle`](/api/common/NgStyle) directives.
```html {prefer}
<div [class.admin]="isAdmin" [class.dense]="density === 'high'">
<div [style.color]="textColor" [style.background-color]="backgroundColor">
<!-- OR -->
<div [class]="{admin: isAdmin, dense: density === 'high'}">
<div [style]="{'color': textColor, 'background-color': backgroundColor}"></div>
</div>
</div>
</div>
```
```html {avoid}
<div [ngClass]="{admin: isAdmin, dense: density === 'high'}">
<div [ngStyle]="{'color': textColor, 'background-color': backgroundColor}"></div>
</div>
```
Both `class` and `style` bindings use a more straightforward syntax that aligns closely with
standard HTML attributes. This makes your templates easier to read and understand, especially for
developers familiar with basic HTML.
Additionally, the `NgClass` and `NgStyle` directives incur an additional performance cost compared
to the built-in `class` and `style` binding syntax.
For more details, refer to the [bindings guide](/guide/templates/binding#css-class-and-style-property-bindings)
### Name event handlers for what they _do_, not for the triggering event
Prefer naming event handlers for the action they perform rather than for the triggering event:
```html {prefer}
<button (click)="saveUserData()">Save</button>
```
```html {avoid}
<button (click)="handleClick()">Save</button>
```
Using meaningful names like this makes it easier to tell what an event does from reading the
template.
For keyboard events, you can use Angular's key event modifiers with specific handler names:
```html
<textarea (keydown.control.enter)="commitNotes()" (keydown.control.space)="showSuggestions()">
```
Sometimes, event handling logic is especially long or complex, making it impractical to declare a
single well-named handler. In these cases, it's fine to fall back to a name like 'handleKeydown' and
then delegate to more specific behaviors based on the event details:
```ts
@Component({
/*...*/
})
class RichText {
handleKeydown(event: KeyboardEvent) {
if (event.ctrlKey) {
if (event.key === 'B') {
this.activateBold();
} else if (event.key === 'I') {
this.activateItalic();
}
// ...
}
}
}
```
### Keep lifecycle methods simple
Avoid putting long or complex logic inside lifecycle hooks like `ngOnInit`. Instead, prefer creating
well-named methods to contain that logic and then _call those methods_ in your lifecycle hooks.
Lifecycle hook names describe _when_ they run, meaning that the code inside doesn't have a
meaningful name that describes what the code inside is doing.
```ts {prefer}
ngOnInit() {
this.startLogging();
this.runBackgroundTask();
}
```
```ts {avoid}
ngOnInit() {
this.logger.setMode('info');
this.logger.monitorErrors();
// ...and all the rest of the code that would be unrolled from these methods.
}
```
### Use lifecycle hook interfaces
Angular provides a TypeScript interface for each lifecycle method. When adding a lifecycle hook to
your class, import and `implement` these interfaces to ensure that the methods are named correctly.
```ts
import {Component, OnInit} from '@angular/core';
@Component({
/*...*/
})
export class UserProfile implements OnInit {
// The `OnInit` interface ensures this method is named correctly.
ngOnInit() {
/* ... */
}
}
```
|
unknown
|
github
|
https://github.com/angular/angular
|
adev/src/content/best-practices/style-guide.md
|
import { test } from '../../test';
export default test({
mode: ['client', 'server'],
compileOptions: {
dev: true
},
get props() {
return { tag: true };
},
error:
'svelte_element_invalid_this_value\n' +
'The `this` prop on `<svelte:element>` must be a string, if defined'
});
|
javascript
|
github
|
https://github.com/sveltejs/svelte
|
packages/svelte/tests/runtime-legacy/samples/dynamic-element-invalid-this-content/_config.js
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import abc
import collections
# face_interfaces is referenced from specification in this module.
from grpc.framework.common import cardinality
from grpc.framework.face import interfaces as face_interfaces # pylint: disable=unused-import
from grpc.framework.face import utilities as face_utilities
from grpc.framework.alpha import _reexport
from grpc.framework.alpha import interfaces
def _qualified_name(service_name, method_name):
return '/%s/%s' % (service_name, method_name)
# TODO(nathaniel): This structure is getting bloated; it could be shrunk if
# implementations._Stub used a generic rather than a dynamic underlying
# face-layer stub.
class InvocationBreakdown(object):
"""An intermediate representation of invocation-side views of RPC methods.
Attributes:
cardinalities: A dictionary from RPC method name to interfaces.Cardinality
value.
qualified_names: A dictionary from unqualified RPC method name to
service-qualified RPC method name.
face_cardinalities: A dictionary from service-qualified RPC method name to
to cardinality.Cardinality value.
request_serializers: A dictionary from service-qualified RPC method name to
callable behavior to be used serializing request values for the RPC.
response_deserializers: A dictionary from service-qualified RPC method name
to callable behavior to be used deserializing response values for the
RPC.
"""
__metaclass__ = abc.ABCMeta
class _EasyInvocationBreakdown(
InvocationBreakdown,
collections.namedtuple(
'_EasyInvocationBreakdown',
('cardinalities', 'qualified_names', 'face_cardinalities',
'request_serializers', 'response_deserializers'))):
pass
class ServiceBreakdown(object):
"""An intermediate representation of service-side views of RPC methods.
Attributes:
implementations: A dictionary from service-qualified RPC method name to
face_interfaces.MethodImplementation implementing the RPC method.
request_deserializers: A dictionary from service-qualified RPC method name
to callable behavior to be used deserializing request values for the RPC.
response_serializers: A dictionary from service-qualified RPC method name
to callable behavior to be used serializing response values for the RPC.
"""
__metaclass__ = abc.ABCMeta
class _EasyServiceBreakdown(
ServiceBreakdown,
collections.namedtuple(
'_EasyServiceBreakdown',
('implementations', 'request_deserializers', 'response_serializers'))):
pass
def break_down_invocation(service_name, method_descriptions):
"""Derives an InvocationBreakdown from several RPC method descriptions.
Args:
service_name: The package-qualified full name of the service.
method_descriptions: A dictionary from RPC method name to
interfaces.RpcMethodInvocationDescription describing the RPCs.
Returns:
An InvocationBreakdown corresponding to the given method descriptions.
"""
cardinalities = {}
qualified_names = {}
face_cardinalities = {}
request_serializers = {}
response_deserializers = {}
for name, method_description in method_descriptions.iteritems():
qualified_name = _qualified_name(service_name, name)
method_cardinality = method_description.cardinality()
cardinalities[name] = method_description.cardinality()
qualified_names[name] = qualified_name
face_cardinalities[qualified_name] = _reexport.common_cardinality(
method_cardinality)
request_serializers[qualified_name] = method_description.serialize_request
response_deserializers[qualified_name] = (
method_description.deserialize_response)
return _EasyInvocationBreakdown(
cardinalities, qualified_names, face_cardinalities, request_serializers,
response_deserializers)
def break_down_service(service_name, method_descriptions):
"""Derives a ServiceBreakdown from several RPC method descriptions.
Args:
method_descriptions: A dictionary from RPC method name to
interfaces.RpcMethodServiceDescription describing the RPCs.
Returns:
A ServiceBreakdown corresponding to the given method descriptions.
"""
implementations = {}
request_deserializers = {}
response_serializers = {}
for name, method_description in method_descriptions.iteritems():
qualified_name = _qualified_name(service_name, name)
method_cardinality = method_description.cardinality()
if method_cardinality is interfaces.Cardinality.UNARY_UNARY:
def service(
request, face_rpc_context,
service_behavior=method_description.service_unary_unary):
return service_behavior(
request, _reexport.rpc_context(face_rpc_context))
implementations[qualified_name] = face_utilities.unary_unary_inline(
service)
elif method_cardinality is interfaces.Cardinality.UNARY_STREAM:
def service(
request, face_rpc_context,
service_behavior=method_description.service_unary_stream):
return service_behavior(
request, _reexport.rpc_context(face_rpc_context))
implementations[qualified_name] = face_utilities.unary_stream_inline(
service)
elif method_cardinality is interfaces.Cardinality.STREAM_UNARY:
def service(
request_iterator, face_rpc_context,
service_behavior=method_description.service_stream_unary):
return service_behavior(
request_iterator, _reexport.rpc_context(face_rpc_context))
implementations[qualified_name] = face_utilities.stream_unary_inline(
service)
elif method_cardinality is interfaces.Cardinality.STREAM_STREAM:
def service(
request_iterator, face_rpc_context,
service_behavior=method_description.service_stream_stream):
return service_behavior(
request_iterator, _reexport.rpc_context(face_rpc_context))
implementations[qualified_name] = face_utilities.stream_stream_inline(
service)
request_deserializers[qualified_name] = (
method_description.deserialize_request)
response_serializers[qualified_name] = (
method_description.serialize_response)
return _EasyServiceBreakdown(
implementations, request_deserializers, response_serializers)
|
unknown
|
codeparrot/codeparrot-clean
| ||
package dockerfile
import (
"bytes"
"context"
"os"
"path/filepath"
"strings"
"github.com/containerd/platforms"
"github.com/moby/moby/api/types/container"
"github.com/moby/moby/api/types/jsonstream"
"github.com/moby/moby/api/types/mount"
"github.com/moby/moby/v2/errdefs"
"github.com/moby/sys/user"
"golang.org/x/sys/windows"
)
// seTakeOwnershipPrivilege is "SE_TAKE_OWNERSHIP_NAME" in the win32 API.
//
// see https://learn.microsoft.com/en-us/windows/win32/secauthz/privilege-constants
const seTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege"
// Constants for well-known SIDs in the Windows container.
// These are currently undocumented.
// See https://github.com/moby/buildkit/pull/5791#discussion_r1976652227 for more information.
const (
containerAdministratorSidString = "S-1-5-93-2-1" // ContainerAdministrator
containerUserSidString = "S-1-5-93-2-2" // ContainerUser
)
func parseChownFlag(ctx context.Context, builder *Builder, state *dispatchState, chown, ctrRootPath string, identityMapping user.IdentityMapping) (identity, error) {
if builder.options.Platform == "windows" {
return getAccountIdentity(ctx, builder, chown, ctrRootPath, state)
}
uid, gid := identityMapping.RootPair()
return identity{UID: uid, GID: gid}, nil
}
func getAccountIdentity(ctx context.Context, builder *Builder, accountName string, ctrRootPath string, state *dispatchState) (identity, error) {
// If this is potentially a string SID then attempt to convert it to verify
// this, otherwise continue looking for the account.
if strings.HasPrefix(accountName, "S-") || strings.HasPrefix(accountName, "s-") {
sid, err := windows.StringToSid(accountName)
if err == nil {
return identity{SID: sid.String()}, nil
}
}
// Attempt to obtain the SID using the name.
sid, _, accType, err := windows.LookupSID("", accountName)
// If this is a SID that is built-in and hence the same across all systems then use that.
if err == nil && (accType == windows.SidTypeAlias || accType == windows.SidTypeWellKnownGroup) {
return identity{SID: sid.String()}, nil
}
// Check if the account name is one unique to containers.
if strings.EqualFold(accountName, "ContainerAdministrator") {
return identity{SID: containerAdministratorSidString}, nil
} else if strings.EqualFold(accountName, "ContainerUser") {
return identity{SID: containerUserSidString}, nil
}
// All other lookups failed, so therefore determine if the account in
// question exists in the container and if so, obtain its SID.
return lookupNTAccount(ctx, builder, accountName, state)
}
func lookupNTAccount(ctx context.Context, builder *Builder, accountName string, state *dispatchState) (identity, error) {
source, _ := filepath.Split(os.Args[0])
target := "C:\\Docker"
targetExecutable := target + "\\containerutility.exe"
optionsPlatform, err := platforms.Parse(builder.options.Platform)
if err != nil {
return identity{}, errdefs.InvalidParameter(err)
}
runConfig := copyRunConfig(state.runConfig,
withCmdCommentString("internal run to obtain NT account information.", optionsPlatform.OS))
runConfig.Cmd = []string{targetExecutable, "getaccountsid", accountName}
hostConfig := &container.HostConfig{
Mounts: []mount.Mount{
{
Type: mount.TypeBind,
Source: source,
Target: target,
ReadOnly: true,
},
},
}
container, err := builder.containerManager.Create(ctx, runConfig, hostConfig)
if err != nil {
return identity{}, err
}
stdout := new(bytes.Buffer)
stderr := new(bytes.Buffer)
if err := builder.containerManager.Run(ctx, container.ID, stdout, stderr); err != nil {
if err, ok := err.(*statusCodeError); ok {
return identity{}, &jsonstream.Error{
Message: stderr.String(),
Code: err.StatusCode(),
}
}
return identity{}, err
}
accountSid := stdout.String()
return identity{SID: accountSid}, nil
}
|
go
|
github
|
https://github.com/moby/moby
|
daemon/builder/dockerfile/internals_windows.go
|
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
package sql_test
import (
"context"
"fmt"
"sort"
"strings"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/sql/sqltestutils"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestShowRangesWithLocality(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numNodes = 3
ctx := context.Background()
tc := testcluster.StartTestCluster(t, numNodes, base.TestClusterArgs{})
defer tc.Stopper().Stop(ctx)
sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0])
sqlDB.Exec(t, `CREATE TABLE t (x INT PRIMARY KEY)`)
sqlDB.Exec(t, `ALTER TABLE t SPLIT AT SELECT i FROM generate_series(0, 20) AS g(i)`)
const (
leaseHolderIdx = iota
leaseHolderLocalityIdx
replicasColIdx
localitiesColIdx
votingReplicasIdx
nonVotingReplicasIdx
)
replicas := make([]int, 3)
// TestClusters get some localities by default.
q := `SELECT lease_holder, lease_holder_locality, replicas, replica_localities, voting_replicas, non_voting_replicas
FROM [SHOW RANGES FROM TABLE t WITH DETAILS]`
result := sqlDB.QueryStr(t, q)
for _, row := range result {
// Verify the leaseholder localities.
leaseHolder := row[leaseHolderIdx]
leaseHolderLocalityExpected := fmt.Sprintf(`region=test,dc=dc%s`, leaseHolder)
require.Equal(t, leaseHolderLocalityExpected, row[leaseHolderLocalityIdx])
// Verify the replica localities.
_, err := fmt.Sscanf(row[replicasColIdx], "{%d,%d,%d}", &replicas[0], &replicas[1], &replicas[2])
require.NoError(t, err)
votingReplicas := sqltestutils.ArrayStringToSlice(t, row[votingReplicasIdx])
sort.Strings(votingReplicas)
require.Equal(t, []string{"1", "2", "3"}, votingReplicas)
nonVotingReplicas := sqltestutils.ArrayStringToSlice(t, row[nonVotingReplicasIdx])
require.Equal(t, []string{}, nonVotingReplicas)
var builder strings.Builder
builder.WriteString("{")
for i, replica := range replicas {
builder.WriteString(fmt.Sprintf(`"region=test,dc=dc%d"`, replica))
if i != len(replicas)-1 {
builder.WriteString(",")
}
}
builder.WriteString("}")
expected := builder.String()
require.Equal(t, expected, row[localitiesColIdx])
}
}
// TestShowRangesMultipleStores tests that the leaseholder_locality shown in
// SHOW RANGES works correctly.
func TestShowRangesMultipleStores(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderRace(t, "the test is too heavy")
ctx := context.Background()
// NodeID=1, StoreID=1,2
tc := testcluster.StartTestCluster(t, 1,
base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
Locality: roachpb.Locality{Tiers: []roachpb.Tier{{Key: "node", Value: "1"}}},
StoreSpecs: []base.StoreSpec{base.DefaultTestStoreSpec, base.DefaultTestStoreSpec},
},
ReplicationMode: base.ReplicationAuto,
},
)
defer tc.Stopper().Stop(ctx)
// NodeID=2, StoreID=3,4
tc.AddAndStartServer(t,
base.TestServerArgs{
Locality: roachpb.Locality{Tiers: []roachpb.Tier{{Key: "node", Value: "2"}}},
StoreSpecs: []base.StoreSpec{base.DefaultTestStoreSpec, base.DefaultTestStoreSpec},
},
)
// NodeID=3, StoreID=5,6
tc.AddAndStartServer(t,
base.TestServerArgs{
Locality: roachpb.Locality{Tiers: []roachpb.Tier{{Key: "node", Value: "3"}}},
StoreSpecs: []base.StoreSpec{base.DefaultTestStoreSpec, base.DefaultTestStoreSpec},
},
)
assert.NoError(t, tc.WaitForFullReplication())
// Scatter a system table so that the lease is unlikely to be on node 1.
sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0])
sqlDB.Exec(t, "ALTER TABLE system.jobs SCATTER")
// Ensure that the localities line up.
for _, q := range []string{
"SHOW RANGES FROM DATABASE system WITH DETAILS",
"SHOW RANGES FROM TABLE system.jobs WITH DETAILS",
"SHOW RANGES FROM INDEX system.jobs@jobs_status_created_idx WITH DETAILS",
"SHOW RANGE FROM TABLE system.jobs FOR ROW (0)",
"SHOW RANGE FROM INDEX system.jobs@jobs_status_created_idx FOR ROW ('running', now(), 0)",
} {
t.Run(q, func(t *testing.T) {
// Retry because if there's not a leaseholder, you can get a NULL.
sqlDB.CheckQueryResultsRetry(t,
fmt.Sprintf(`
SELECT DISTINCT
(
array_position(replica_localities, lease_holder_locality)
= array_position(replicas, lease_holder)
)
FROM [%s]`, q), [][]string{{"true"}})
})
}
}
func TestShowRangesWithDetails(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderRace(t, "the test is too heavy")
ctx := context.Background()
tc := testcluster.StartTestCluster(t, 3, base.TestClusterArgs{})
defer tc.Stopper().Stop(ctx)
// Disable automatic load-based splits to have full control over range
// boundaries during the test.
systemDB := sqlutils.MakeSQLRunner(tc.SystemLayer(0).SQLConn(t))
systemDB.Exec(t, `SET CLUSTER SETTING kv.range_split.by_load_enabled = false`)
sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0])
sqlDB.Exec(t, "CREATE DATABASE test")
sqlDB.Exec(t, "USE test")
sqlDB.Exec(t, `
CREATE TABLE users (
id INTEGER PRIMARY KEY,
name STRING
)
`)
// Assert the required keys are present.
res := sqlDB.Query(t, `
SELECT
span_stats->'approximate_disk_bytes',
span_stats->'key_count',
span_stats->'key_bytes',
span_stats->'val_count',
span_stats->'val_bytes',
span_stats->'sys_count',
span_stats->'sys_bytes',
span_stats->'live_count',
span_stats->'live_bytes',
span_stats->'intent_count',
span_stats->'intent_bytes'
FROM [SHOW RANGES FROM DATABASE test WITH DETAILS]`)
res.Next()
vals := make([]interface{}, 11)
for i := range vals {
vals[i] = new(interface{})
}
err := res.Scan(vals...)
// Every key should be present, and the scan should be successful.
require.NoError(t, err)
// This invocation of SHOW RANGES should have only returned a single row.
require.Equal(t, false, res.NextResultSet())
// Assert the counterpoint: Scan should return an error for a key that
// does not exist.
badQuery := sqlDB.Query(t, `
SELECT span_stats->'key_does_not_exist'
FROM [SHOW RANGES FROM DATABASE test WITH DETAILS]`)
badQuery.Next()
var keyDoesNotExistVal int
err = badQuery.Scan(&keyDoesNotExistVal)
require.Error(t, err)
// Now, let's add some users, and query the table's val_bytes.
sqlDB.Exec(t, "INSERT INTO test.users (id, name) VALUES (1, 'ab'), (2, 'cd')")
isSystemTenant := tc.ApplicationLayer(0).Codec().ForSystemTenant()
var valBytesPreSplit int
if isSystemTenant {
valBytesPreSplitRes := sqlDB.QueryRow(t, `
SELECT span_stats->'val_bytes'
FROM [SHOW RANGES FROM TABLE test.users WITH DETAILS]`,
)
valBytesPreSplitRes.Scan(&valBytesPreSplit)
}
// Split the table at the second row, so it occupies a second range.
sqlDB.Exec(t, `ALTER TABLE test.users SPLIT AT VALUES (2)`)
testutils.SucceedsSoon(t, func() error {
afterSplit := sqlDB.Query(t, `
SELECT span_stats->'val_bytes'
FROM [SHOW RANGES FROM TABLE test.users WITH DETAILS]
`)
defer afterSplit.Close()
var valBytesR1 int
var valBytesR2 int
afterSplit.Next()
err = afterSplit.Scan(&valBytesR1)
if err != nil {
return err
}
afterSplit.Next()
err = afterSplit.Scan(&valBytesR2)
if err != nil {
return err
}
// For system tenant, verify the sum of parts equals the whole.
// For secondary tenants, we skip this check because span stats accounting
// works differently with tenant-prefixed keys.
if isSystemTenant && valBytesPreSplit != valBytesR1+valBytesR2 {
return errors.Newf("expected %d to equal %d + %d", valBytesPreSplit, valBytesR1, valBytesR2)
}
return nil
})
}
// TestShowRangesUnavailableReplicas tests that SHOW RANGES does not return an
// error if it encounters an unavailable range. Moreover, crdb_internal.ranges
// includes the encountered error.
func TestShowRangesUnavailableReplicas(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numNodes = 3
ctx := context.Background()
// This test requires system tenant because it controls server lifecycle
// (stopping servers to create unavailable ranges) and uses manual
// replication mode - both are KV-layer infrastructure operations.
tc := testcluster.StartTestCluster(
t, numNodes, base.TestClusterArgs{
ReplicationMode: base.ReplicationManual,
ServerArgs: base.TestServerArgs{
DefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,
},
},
)
defer tc.Stopper().Stop(ctx)
systemDB := sqlutils.MakeSQLRunner(tc.SystemLayer(0).SQLConn(t))
systemDB.Exec(t, `SET CLUSTER SETTING kv.replica_circuit_breaker.slow_replication_threshold='1s'`)
systemDB.Exec(t, `SET CLUSTER SETTING kv.replica_raft.leaderless_unavailable_threshold='5s'`)
sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0])
sqlDB.Exec(t, `CREATE TABLE t (x INT PRIMARY KEY)`)
// Split the table's range to have a better chance of moving some leaseholders
// off of node 1 in the scatter below.
sqlDB.Exec(t, `ALTER TABLE t SPLIT AT SELECT i FROM generate_series(0, 20) AS g(i)`)
sqlDB.Exec(t, `ALTER TABLE t SCATTER`)
// Server 0 includes the leaseholders for all system ranges, but the other two
// are safe to stop to create some unavailable ranges that belong to table t.
tc.StopServer(1)
tc.StopServer(2)
q := `SELECT range_id, lease_holder, range_size FROM [SHOW RANGES FROM TABLE t WITH DETAILS]`
result := sqlDB.QueryStr(t, q)
unavailableRangeID := ""
// Iterate over the results to find an unavailable range.
for _, row := range result {
// crdb_internal.ranges powers the lease_holder and range_size fields in
// SHOW RANGES. If a range is unavailable, the former returns NULL for both
// fields but the latter converts the NULL leaseholder to 0.
if row[1] == "0" {
unavailableRangeID = row[0]
require.Equal(t, "NULL", row[2])
break
}
}
// Ensure there it at least one unavailable range.
require.NotEqual(t, "", unavailableRangeID)
// crdb_internal.ranges also has an "errors" field that includes any errors
// encountered while fetching the leaseholder and range stats. For the
// unavailable range, we expect a "replica unavailable" error.
q = fmt.Sprintf(`SELECT errors FROM crdb_internal.ranges WHERE range_id = %s`, unavailableRangeID)
result = sqlDB.QueryStr(t, q)
expectedError := fmt.Sprintf(
"replica unavailable.*unable to serve request to r%s", unavailableRangeID,
)
require.Regexp(t, expectedError, result[0][0])
}
|
go
|
github
|
https://github.com/cockroachdb/cockroach
|
pkg/sql/show_ranges_test.go
|
"""
=================================================
Orthogonal distance regression (:mod:`scipy.odr`)
=================================================
.. currentmodule:: scipy.odr
Package Content
===============
.. autosummary::
:toctree: generated/
Data -- The data to fit.
RealData -- Data with weights as actual std. dev.s and/or covariances.
Model -- Stores information about the function to be fit.
ODR -- Gathers all info & manages the main fitting routine.
Output -- Result from the fit.
odr -- Low-level function for ODR.
odr_error -- Error exception.
odr_stop -- Stop exception.
Prebuilt models:
.. autosummary::
:toctree: generated/
polynomial
.. data:: exponential
.. data:: multilinear
.. data:: unilinear
.. data:: quadratic
.. data:: polynomial
Usage information
=================
Introduction
------------
Why Orthogonal Distance Regression (ODR)? Sometimes one has
measurement errors in the explanatory (a.k.a., "independent")
variable(s), not just the response (a.k.a., "dependent") variable(s).
Ordinary Least Squares (OLS) fitting procedures treat the data for
explanatory variables as fixed, i.e., not subject to error of any kind.
Furthermore, OLS procedures require that the response variables be an
explicit function of the explanatory variables; sometimes making the
equation explicit is impractical and/or introduces errors. ODR can
handle both of these cases with ease, and can even reduce to the OLS
case if that is sufficient for the problem.
ODRPACK is a FORTRAN-77 library for performing ODR with possibly
non-linear fitting functions. It uses a modified trust-region
Levenberg-Marquardt-type algorithm [1]_ to estimate the function
parameters. The fitting functions are provided by Python functions
operating on NumPy arrays. The required derivatives may be provided
by Python functions as well, or may be estimated numerically. ODRPACK
can do explicit or implicit ODR fits, or it can do OLS. Input and
output variables may be multi-dimensional. Weights can be provided to
account for different variances of the observations, and even
covariances between dimensions of the variables.
The `scipy.odr` package offers an object-oriented interface to
ODRPACK, in addition to the low-level `odr` function.
Additional background information about ODRPACK can be found in the
`ODRPACK User's Guide
<http://docs.scipy.org/doc/external/odrpack_guide.pdf>`_, reading
which is recommended.
Basic usage
-----------
1. Define the function you want to fit against.::
def f(B, x):
'''Linear function y = m*x + b'''
# B is a vector of the parameters.
# x is an array of the current x values.
# x is in the same format as the x passed to Data or RealData.
#
# Return an array in the same format as y passed to Data or RealData.
return B[0]*x + B[1]
2. Create a Model.::
linear = Model(f)
3. Create a Data or RealData instance.::
mydata = Data(x, y, wd=1./power(sx,2), we=1./power(sy,2))
or, when the actual covariances are known::
mydata = RealData(x, y, sx=sx, sy=sy)
4. Instantiate ODR with your data, model and initial parameter estimate.::
myodr = ODR(mydata, linear, beta0=[1., 2.])
5. Run the fit.::
myoutput = myodr.run()
6. Examine output.::
myoutput.pprint()
References
----------
.. [1] P. T. Boggs and J. E. Rogers, "Orthogonal Distance Regression,"
in "Statistical analysis of measurement error models and
applications: proceedings of the AMS-IMS-SIAM joint summer research
conference held June 10-16, 1989," Contemporary Mathematics,
vol. 112, pg. 186, 1990.
"""
# version: 0.7
# author: Robert Kern <robert.kern@gmail.com>
# date: 2006-09-21
from __future__ import division, print_function, absolute_import
from .odrpack import *
from .models import *
from . import add_newdocs
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.testing import Tester
test = Tester().test
|
unknown
|
codeparrot/codeparrot-clean
| ||
<li [class.completed]="todo().completed">
<div class="view" appTooltip>
<input class="toggle" type="checkbox" [checked]="todo().completed" (change)="toggle()" />
<label (dblclick)="enableEditMode()" [style.display]="editMode ? 'none' : 'block'">
{{ todo().label }}
</label>
<button class="destroy" (click)="delete.emit(todo())"></button>
</div>
<input
class="edit"
[value]="todo().label"
[style.display]="editMode ? 'block' : 'none'"
(keydown.enter)="completeEdit($any($event.target).value)"
/>
</li>
|
html
|
github
|
https://github.com/angular/angular
|
devtools/src/app/demo-app/todo/home/todo.component.html
|
# ---------------------------------------------------------------------------- #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# ---------------------------------------------------------------------------- #
from chareditor import CharEditorScreen
from Title import Title
from game import GameScreen
from game import GameSetupScreen
# Constant screen names
TITLE = 'title'
CHAR_EDITOR = 'character editor'
GAME = 'game'
GAME_SETUP = 'game setup'
CONFIG = 'config'
__all__ = ["TITLE", "CHAR_EDITOR", "GAME", "GAME_SETUP", "CONFIG",
]
screens = {
TITLE : TitleScreen,
CHAR_EDITOR : CharEditorScreen,
GAME: GameScreen,
GAME_SETUP: GameSetup,
#CONFIG: ConfigScreen,
#BESTIARY: BestiaryScreen,
#CHAR_VIEWER: CharViewerScreen,
}
__all__ = 'screens'
class ScreenNav:
"""Handles the navagation between 'screen objects'."""
def __init__(self):
self._last = None
self._current = None
@property
def current(self):
"""The current screen."""
return self._current
@property
def last(self):
"""The last screen."""
return self._last
def can_go_back(self):
"""True if can return to the previous screen."""
return (self._last is not None or self._current._can_go_back)
# ---- Handlers ------------------------------------------------------ #
def go_back(self):
"""Go back to the previous screen, if the current screen permits it."""
if self.can_go_back():
self._change_screen(self._last, self._current._cleanup_on_go_back)
def _goto(self, screen):
"""Goto given screen."""
if screen != self._current:
if screen == self._last: self.go_back
if screen in screens:
if isinstance(screen, str): screen = screens[screen]
if isinstance(screen, Screen):
self._change_screen(screen, self._current._cleanup_on_goto)
else: raise valueError("invalid screen: {}".format(screen))
def exit_to_title(self):
"""Exit from the current screen and go back to the title screen."""
if self._current.name != TITLE:
self._current.exit_to_title
def quit_to_title(self):
self._current.quit
def _change_screen(self, n, cleanup):
# helper for go_back and goto
if cleanup: # kill the current screen
self.current.end
x = self._current
self._current = n
self._last = None
x.cleanup
else: # keep both screens alive
x = self._last
self._last = self._current
self._current = x
class BDScreen:
"""Base class for Beyond Dreams "Screen" Objects.
This defines what will be displayed when
'session.screen' = a given screen object.
"""
_name = "" # Name must match key in 'screens'
def __init__(self):
# Bool States
self._running = False
self._can_go_back = False
self._cleanup_on_go_back = True
self._cleanup_on_goto = True
# eq, ne -- test 'x is self', then x 'isinstance of' and so on
def __eq__(self, x):
if x is not self:
if (isinstance(x, str) and x == self._name): return True
if (isinstance(x, BDScreen): return x._name == self._name
raise TypeError("cannot compare type '{}' to BDScreen type.".format(
x.type))
return True
def __ne__(self, x):
if x is not self:
if (isinstance(x, str) and x != name): return True
if isinstance(x, BDScreen): return x._name != self._name
raise TypeError("cannot compare type '{}' to BDScreen type.".format(
x.type))
return False
@property
def name(self):
"""The name of this screen."""
return self._name
def is_running(self):
"""True if this screen is currently running."""
return self._running
def start(self):
"""Start this screen."""
if session._screen != self:
try: self.pre_run
except: pass
session._screen = self
self.run
# Optional
def pre_run(self):
"""Called before the screen becomes active."""
raise NotImplementedError
def has_unsaved_data(self):
"""Return True if there is unsaved data."""
return False
def run(self):
raise NotImplementedError
def end(self):
"""Called to end this screen."""
pass
# Subclasses must call these
def exit_to_title(self):
"""Exit this screen and return to the title screen."""
raise NotImplementedError
def quit(self):
"""Quit the game and return to the desktop."""
raise NotImplementedError
def cleanup(self):
"""Called to kill this screen after screen transition."""
pass
|
unknown
|
codeparrot/codeparrot-clean
| ||
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Bridge\Twig\Tests\Extension;
use PHPUnit\Framework\Attributes\DataProvider;
use PHPUnit\Framework\MockObject\Stub;
use Symfony\Bridge\Twig\Test\FormLayoutTestCase;
use Symfony\Component\Form\Extension\Core\Type\PercentType;
use Symfony\Component\Form\Extension\Core\Type\SubmitType;
use Symfony\Component\Form\Extension\Csrf\CsrfExtension;
use Symfony\Component\Form\FormError;
use Symfony\Component\Form\FormExtensionInterface;
use Symfony\Component\Form\FormView;
use Symfony\Component\Security\Csrf\CsrfTokenManagerInterface;
use Symfony\Component\Translation\TranslatableMessage;
use Symfony\Contracts\Translation\TranslatableInterface;
use Symfony\Contracts\Translation\TranslatorInterface;
abstract class AbstractLayoutTestCase extends FormLayoutTestCase
{
protected Stub&CsrfTokenManagerInterface $csrfTokenManager;
protected array $testableFeatures = [];
private string $defaultLocale;
protected function setUp(): void
{
if (!\extension_loaded('intl')) {
$this->markTestSkipped('Extension intl is required.');
}
$this->defaultLocale = \Locale::getDefault();
\Locale::setDefault('en');
$this->csrfTokenManager = $this->createStub(CsrfTokenManagerInterface::class);
parent::setUp();
}
/**
* @return FormExtensionInterface[]
*/
protected function getExtensions(): array
{
return [
new CsrfExtension($this->csrfTokenManager),
];
}
protected function tearDown(): void
{
if (isset($this->defaultLocale)) {
\Locale::setDefault($this->defaultLocale);
}
}
protected function assertWidgetMatchesXpath(FormView $view, array $vars, $xpath)
{
// include ampersands everywhere to validate escaping
$html = $this->renderWidget($view, array_merge([
'id' => 'my&id',
'attr' => ['class' => 'my&class'],
], $vars));
if (!isset($vars['id'])) {
$xpath = trim($xpath).'
[@id="my&id"]';
}
if (!isset($vars['attr']['class'])) {
$xpath .= '
[@class="my&class"]';
}
$this->assertMatchesXpath($html, $xpath);
}
public function testLabel()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TextType');
$view = $form->createView();
$this->renderWidget($view, ['label' => 'foo']);
$html = $this->renderLabel($view);
$this->assertMatchesXpath($html,
'/label
[@for="name"]
[.="[trans]Name[/trans]"]
'
);
}
public function testLabelWithoutTranslation()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TextType', null, [
'translation_domain' => false,
]);
$this->assertMatchesXpath($this->renderLabel($form->createView()),
'/label
[@for="name"]
[.="Name"]
'
);
}
public function testLabelOnForm()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\DateType', null, ['widget' => 'choice']);
$view = $form->createView();
$this->renderWidget($view, ['label' => 'foo']);
$html = $this->renderLabel($view);
$this->assertMatchesXpath($html,
'/label
[@class="required"]
[.="[trans]Name[/trans]"]
'
);
}
public function testLabelWithCustomTextPassedAsOption()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TextType', null, [
'label' => 'Custom label',
]);
$html = $this->renderLabel($form->createView());
$this->assertMatchesXpath($html,
'/label
[@for="name"]
[.="[trans]Custom label[/trans]"]
'
);
}
public function testLabelWithCustomTextPassedDirectly()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TextType');
$html = $this->renderLabel($form->createView(), 'Custom label');
$this->assertMatchesXpath($html,
'/label
[@for="name"]
[.="[trans]Custom label[/trans]"]
'
);
}
public function testLabelWithCustomTextPassedAsOptionAndDirectly()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TextType', null, [
'label' => 'Custom label',
]);
$html = $this->renderLabel($form->createView(), 'Overridden label');
$this->assertMatchesXpath($html,
'/label
[@for="name"]
[.="[trans]Overridden label[/trans]"]
'
);
}
public function testLabelDoesNotRenderFieldAttributes()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TextType');
$html = $this->renderLabel($form->createView(), null, [
'attr' => [
'class' => 'my&class',
],
]);
$this->assertMatchesXpath($html,
'/label
[@for="name"]
[@class="required"]
'
);
}
public function testLabelWithCustomAttributesPassedDirectly()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TextType');
$html = $this->renderLabel($form->createView(), null, [
'label_attr' => [
'class' => 'my&class',
],
]);
$this->assertMatchesXpath($html,
'/label
[@for="name"]
[@class="my&class required"]
'
);
}
public function testLabelWithCustomTextAndCustomAttributesPassedDirectly()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TextType');
$html = $this->renderLabel($form->createView(), 'Custom label', [
'label_attr' => [
'class' => 'my&class',
],
]);
$this->assertMatchesXpath($html,
'/label
[@for="name"]
[@class="my&class required"]
[.="[trans]Custom label[/trans]"]
'
);
}
// https://github.com/symfony/symfony/issues/5029
public function testLabelWithCustomTextAsOptionAndCustomAttributesPassedDirectly()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TextType', null, [
'label' => 'Custom label',
]);
$html = $this->renderLabel($form->createView(), null, [
'label_attr' => [
'class' => 'my&class',
],
]);
$this->assertMatchesXpath($html,
'/label
[@for="name"]
[@class="my&class required"]
[.="[trans]Custom label[/trans]"]
'
);
}
public function testLabelFormatName()
{
$form = $this->factory->createNamedBuilder('myform')
->add('myfield', 'Symfony\Component\Form\Extension\Core\Type\TextType')
->getForm();
$view = $form->get('myfield')->createView();
$html = $this->renderLabel($view, null, ['label_format' => 'form.%name%']);
$this->assertMatchesXpath($html,
'/label
[@for="myform_myfield"]
[.="[trans]form.myfield[/trans]"]
'
);
}
public function testLabelFormatId()
{
$form = $this->factory->createNamedBuilder('myform')
->add('myfield', 'Symfony\Component\Form\Extension\Core\Type\TextType')
->getForm();
$view = $form->get('myfield')->createView();
$html = $this->renderLabel($view, null, ['label_format' => 'form.%id%']);
$this->assertMatchesXpath($html,
'/label
[@for="myform_myfield"]
[.="[trans]form.myform_myfield[/trans]"]
'
);
}
public function testLabelFormatAsFormOption()
{
$options = ['label_format' => 'form.%name%'];
$form = $this->factory->createNamedBuilder('myform', 'Symfony\Component\Form\Extension\Core\Type\FormType', null, $options)
->add('myfield', 'Symfony\Component\Form\Extension\Core\Type\TextType')
->getForm();
$view = $form->get('myfield')->createView();
$html = $this->renderLabel($view);
$this->assertMatchesXpath($html,
'/label
[@for="myform_myfield"]
[.="[trans]form.myfield[/trans]"]
'
);
}
public function testLabelFormatOverriddenOption()
{
$options = ['label_format' => 'form.%name%'];
$form = $this->factory->createNamedBuilder('myform', 'Symfony\Component\Form\Extension\Core\Type\FormType', null, $options)
->add('myfield', 'Symfony\Component\Form\Extension\Core\Type\TextType', ['label_format' => 'field.%name%'])
->getForm();
$view = $form->get('myfield')->createView();
$html = $this->renderLabel($view);
$this->assertMatchesXpath($html,
'/label
[@for="myform_myfield"]
[.="[trans]field.myfield[/trans]"]
'
);
}
public function testLabelWithoutTranslationOnButton()
{
$form = $this->factory->createNamedBuilder('myform', 'Symfony\Component\Form\Extension\Core\Type\FormType', null, [
'translation_domain' => false,
])
->add('mybutton', 'Symfony\Component\Form\Extension\Core\Type\ButtonType')
->getForm();
$view = $form->get('mybutton')->createView();
$html = $this->renderWidget($view);
$this->assertMatchesXpath($html,
'/button
[@type="button"]
[@name="myform[mybutton]"]
[.="Mybutton"]
'
);
}
public function testLabelFormatOnButton()
{
$form = $this->factory->createNamedBuilder('myform')
->add('mybutton', 'Symfony\Component\Form\Extension\Core\Type\ButtonType')
->getForm();
$view = $form->get('mybutton')->createView();
$html = $this->renderWidget($view, ['label_format' => 'form.%name%']);
$this->assertMatchesXpath($html,
'/button
[@type="button"]
[@name="myform[mybutton]"]
[.="[trans]form.mybutton[/trans]"]
'
);
}
public function testLabelFormatOnButtonId()
{
$form = $this->factory->createNamedBuilder('myform')
->add('mybutton', 'Symfony\Component\Form\Extension\Core\Type\ButtonType')
->getForm();
$view = $form->get('mybutton')->createView();
$html = $this->renderWidget($view, ['label_format' => 'form.%id%']);
$this->assertMatchesXpath($html,
'/button
[@type="button"]
[@name="myform[mybutton]"]
[.="[trans]form.myform_mybutton[/trans]"]
'
);
}
public function testHelp()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TextType', null, [
'help' => 'Help text test!',
]);
$view = $form->createView();
$html = $this->renderHelp($view);
$this->assertMatchesXpath($html,
'/*[self::div or self::p]
[@id="name_help"]
[@class="help-text"]
[.="[trans]Help text test![/trans]"]
'
);
}
public function testHelpNotSet()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TextType');
$view = $form->createView();
$html = $this->renderHelp($view);
$this->assertMatchesXpath($html, '/p', 0);
}
public function testHelpSetLinkFromWidget()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TextType', null, [
'help' => 'Help text test!',
]);
$view = $form->createView();
$html = $this->renderRow($view);
// Test if renderHelp method is implemented (throw SkippedTestError if not)
$this->renderHelp($view);
$this->assertMatchesXpath($html,
'//input
[@aria-describedby="name_help"]
'
);
}
public function testHelpNotSetNotLinkedFromWidget()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TextType');
$view = $form->createView();
$html = $this->renderRow($view);
// Test if renderHelp method is implemented (throw SkippedTestError if not)
$this->renderHelp($view);
$this->assertMatchesXpath($html,
'//input
[not(@aria-describedby)]
'
);
}
public function testErrors()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TextType');
$form->addError(new FormError('[trans]Error 1[/trans]'));
$form->addError(new FormError('[trans]Error 2[/trans]'));
$view = $form->createView();
$html = $this->renderErrors($view);
$this->assertMatchesXpath($html,
'/ul
[
./li[.="[trans]Error 1[/trans]"]
/following-sibling::li[.="[trans]Error 2[/trans]"]
]
[count(./li)=2]
'
);
}
public function testOverrideWidgetBlock()
{
// see custom_widgets.html.twig
$form = $this->factory->createNamed('text_id', 'Symfony\Component\Form\Extension\Core\Type\TextType');
$html = $this->renderWidget($form->createView());
$this->assertMatchesXpath($html,
'/div
[
./input
[@type="text"]
[@id="text_id"]
]
[@id="container"]
'
);
}
public function testCheckedCheckbox()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\CheckboxType', true);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="checkbox"]
[@name="name"]
[@checked="checked"]
[@value="1"]
'
);
}
public function testUncheckedCheckbox()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\CheckboxType', false);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="checkbox"]
[@name="name"]
[not(@checked)]
'
);
}
public function testCheckboxWithValue()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\CheckboxType', false, [
'value' => 'foo&bar',
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="checkbox"]
[@name="name"]
[@value="foo&bar"]
'
);
}
public function testSingleChoice()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', '&a', [
'choices' => ['Choice&A' => '&a', 'Choice&B' => '&b'],
'multiple' => false,
'expanded' => false,
]);
// If the field is collapsed, has no "multiple" attribute, is required but
// has *no* empty value, the "required" must not be added, otherwise
// the resulting HTML is invalid.
// https://github.com/symfony/symfony/issues/8942
// HTML 5 spec
// http://www.w3.org/html/wg/drafts/html/master/forms.html#placeholder-label-option
// "If a select element has a required attribute specified, does not
// have a multiple attribute specified, and has a display size of 1,
// then the select element must have a placeholder label option."
$this->assertWidgetMatchesXpath($form->createView(), [],
'/select
[@name="name"]
[not(@required)]
[
./option[@value="&a"][@selected="selected"][.="[trans]Choice&A[/trans]"]
/following-sibling::option[@value="&b"][not(@selected)][.="[trans]Choice&B[/trans]"]
]
[count(./option)=2]
'
);
}
public function testSelectWithSizeBiggerThanOneCanBeRequired()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', null, [
'choices' => ['a', 'b'],
'multiple' => false,
'expanded' => false,
'attr' => ['size' => 2],
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/select
[@name="name"]
[@required="required"]
[@size="2"]
[count(./option)=2]
'
);
}
public function testSingleChoiceWithoutTranslation()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', '&a', [
'choices' => ['Choice&A' => '&a', 'Choice&B' => '&b'],
'multiple' => false,
'expanded' => false,
'choice_translation_domain' => false,
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/select
[@name="name"]
[not(@required)]
[
./option[@value="&a"][@selected="selected"][.="Choice&A"]
/following-sibling::option[@value="&b"][not(@selected)][.="Choice&B"]
]
[count(./option)=2]
'
);
}
public function testSingleChoiceWithPlaceholderWithoutTranslation()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', '&a', [
'choices' => ['Choice&A' => '&a', 'Choice&B' => '&b'],
'multiple' => false,
'expanded' => false,
'required' => false,
'translation_domain' => false,
'placeholder' => 'Placeholder&Not&Translated',
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/select
[@name="name"]
[not(@required)]
[
./option[@value=""][not(@selected)][not(@disabled)][.="Placeholder&Not&Translated"]
/following-sibling::option[@value="&a"][@selected="selected"][.="Choice&A"]
/following-sibling::option[@value="&b"][not(@selected)][.="Choice&B"]
]
[count(./option)=3]
'
);
}
public function testSingleChoiceAttributes()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', '&a', [
'choices' => ['Choice&A' => '&a', 'Choice&B' => '&b'],
'choice_attr' => ['Choice&B' => ['class' => 'foo&bar']],
'multiple' => false,
'expanded' => false,
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/select
[@name="name"]
[not(@required)]
[
./option[@value="&a"][@selected="selected"][.="[trans]Choice&A[/trans]"]
/following-sibling::option[@value="&b"][@class="foo&bar"][not(@selected)][.="[trans]Choice&B[/trans]"]
]
[count(./option)=2]
'
);
}
public function testSingleChoiceAttributesWithMainAttributes()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', '&a', [
'choices' => ['Choice&A' => '&a', 'Choice&B' => '&b'],
'multiple' => false,
'expanded' => false,
'attr' => ['class' => 'bar&baz'],
]);
$this->assertWidgetMatchesXpath($form->createView(), ['attr' => ['class' => 'bar&baz']],
'/select
[@name="name"]
[@class="bar&baz"]
[not(@required)]
[
./option[@value="&a"][@selected="selected"][.="[trans]Choice&A[/trans]"][not(@id)][not(@name)]
/following-sibling::option[@value="&b"][not(@class)][not(@selected)][.="[trans]Choice&B[/trans]"][not(@id)][not(@name)]
]
[count(./option)=2]
'
);
}
public function testSingleExpandedChoiceAttributesWithMainAttributes()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', '&a', [
'choices' => ['Choice&A' => '&a', 'Choice&B' => '&b'],
'multiple' => false,
'expanded' => true,
'attr' => ['class' => 'bar&baz'],
]);
$this->assertWidgetMatchesXpath($form->createView(), ['attr' => ['class' => 'bar&baz']],
'/div
[@class="bar&baz"]
[
./input[@type="radio"][@name="name"][@id="name_0"][@value="&a"][@checked]
/following-sibling::label[@for="name_0"][.="[trans]Choice&A[/trans]"]
/following-sibling::input[@type="radio"][@name="name"][@id="name_1"][@value="&b"][not(@checked)]
/following-sibling::label[@for="name_1"][.="[trans]Choice&B[/trans]"]
/following-sibling::input[@type="hidden"][@id="name__token"]
]
[count(./input)=3]
'
);
}
public function testSingleChoiceWithPreferred()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', '&a', [
'choices' => ['Choice&A' => '&a', 'Choice&B' => '&b'],
'preferred_choices' => ['&b'],
'multiple' => false,
'expanded' => false,
]);
$this->assertWidgetMatchesXpath($form->createView(), ['separator' => '-- sep --'],
'/select
[@name="name"]
[not(@required)]
[
./option[@value="&b"][not(@selected)][.="[trans]Choice&B[/trans]"]
/following-sibling::option[@disabled="disabled"][not(@selected)][.="-- sep --"]
/following-sibling::option[@value="&a"][@selected="selected"][.="[trans]Choice&A[/trans]"]
/following-sibling::option[@value="&b"][.="[trans]Choice&B[/trans]"]
]
[count(./option)=4]
'
);
}
public function testSingleChoiceWithPreferredAndNoSeparator()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', '&a', [
'choices' => ['Choice&A' => '&a', 'Choice&B' => '&b'],
'preferred_choices' => ['&b'],
'multiple' => false,
'expanded' => false,
]);
$this->assertWidgetMatchesXpath($form->createView(), ['separator' => null],
'/select
[@name="name"]
[not(@required)]
[
./option[@value="&b"][not(@selected)][.="[trans]Choice&B[/trans]"]
/following-sibling::option[@value="&a"][@selected="selected"][.="[trans]Choice&A[/trans]"]
/following-sibling::option[@value="&b"][.="[trans]Choice&B[/trans]"]
]
[count(./option)=3]
'
);
}
public function testSingleChoiceWithPreferredAndBlankSeparator()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', '&a', [
'choices' => ['Choice&A' => '&a', 'Choice&B' => '&b'],
'preferred_choices' => ['&b'],
'multiple' => false,
'expanded' => false,
]);
$this->assertWidgetMatchesXpath($form->createView(), ['separator' => ''],
'/select
[@name="name"]
[not(@required)]
[
./option[@value="&b"][not(@selected)][.="[trans]Choice&B[/trans]"]
/following-sibling::option[@disabled="disabled"][not(@selected)][.=""]
/following-sibling::option[@value="&a"][@selected="selected"][.="[trans]Choice&A[/trans]"]
/following-sibling::option[@value="&b"][.="[trans]Choice&B[/trans]"]
]
[count(./option)=4]
'
);
}
public function testChoiceWithOnlyPreferred()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', '&a', [
'choices' => ['Choice&A' => '&a', 'Choice&B' => '&b'],
'preferred_choices' => ['&a', '&b'],
'multiple' => false,
'expanded' => false,
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/select
[count(./option)=5]
'
);
}
public function testSingleChoiceNonRequired()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', '&a', [
'choices' => ['Choice&A' => '&a', 'Choice&B' => '&b'],
'required' => false,
'multiple' => false,
'expanded' => false,
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/select
[@name="name"]
[not(@required)]
[
./option[@value=""][.=""]
/following-sibling::option[@value="&a"][@selected="selected"][.="[trans]Choice&A[/trans]"]
/following-sibling::option[@value="&b"][not(@selected)][.="[trans]Choice&B[/trans]"]
]
[count(./option)=3]
'
);
}
public function testSingleChoiceNonRequiredNoneSelected()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', null, [
'choices' => ['Choice&A' => '&a', 'Choice&B' => '&b'],
'required' => false,
'multiple' => false,
'expanded' => false,
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/select
[@name="name"]
[not(@required)]
[
./option[@value=""][.=""]
/following-sibling::option[@value="&a"][not(@selected)][.="[trans]Choice&A[/trans]"]
/following-sibling::option[@value="&b"][not(@selected)][.="[trans]Choice&B[/trans]"]
]
[count(./option)=3]
'
);
}
public function testSingleChoiceNonRequiredWithPlaceholder()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', '&a', [
'choices' => ['Choice&A' => '&a', 'Choice&B' => '&b'],
'multiple' => false,
'expanded' => false,
'required' => false,
'placeholder' => 'Select&Anything&Not&Me',
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/select
[@name="name"]
[not(@required)]
[
./option[@value=""][not(@selected)][not(@disabled)][.="[trans]Select&Anything&Not&Me[/trans]"]
/following-sibling::option[@value="&a"][@selected="selected"][.="[trans]Choice&A[/trans]"]
/following-sibling::option[@value="&b"][not(@selected)][.="[trans]Choice&B[/trans]"]
]
[count(./option)=3]
'
);
}
public function testSingleChoiceRequiredWithPlaceholder()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', '&a', [
'choices' => ['Choice&A' => '&a', 'Choice&B' => '&b'],
'required' => true,
'multiple' => false,
'expanded' => false,
'placeholder' => 'Test&Me',
]);
// The "disabled" attribute was removed again due to a bug in the
// BlackBerry 10 browser.
// See https://github.com/symfony/symfony/pull/7678
$this->assertWidgetMatchesXpath($form->createView(), [],
'/select
[@name="name"]
[@required="required"]
[
./option[@value=""][not(@selected)][not(@disabled)][.="[trans]Test&Me[/trans]"]
/following-sibling::option[@value="&a"][@selected="selected"][.="[trans]Choice&A[/trans]"]
/following-sibling::option[@value="&b"][not(@selected)][.="[trans]Choice&B[/trans]"]
]
[count(./option)=3]
'
);
}
public function testSingleChoiceRequiredWithPlaceholderViaView()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', '&a', [
'choices' => ['Choice&A' => '&a', 'Choice&B' => '&b'],
'required' => true,
'multiple' => false,
'expanded' => false,
]);
// The "disabled" attribute was removed again due to a bug in the
// BlackBerry 10 browser.
// See https://github.com/symfony/symfony/pull/7678
$this->assertWidgetMatchesXpath($form->createView(), ['placeholder' => ''],
'/select
[@name="name"]
[@required="required"]
[
./option[@value=""][not(@selected)][not(@disabled)][.=""]
/following-sibling::option[@value="&a"][@selected="selected"][.="[trans]Choice&A[/trans]"]
/following-sibling::option[@value="&b"][not(@selected)][.="[trans]Choice&B[/trans]"]
]
[count(./option)=3]
'
);
}
public function testSingleChoiceGrouped()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', '&a', [
'choices' => [
'Group&1' => ['Choice&A' => '&a', 'Choice&B' => '&b'],
'Group&2' => ['Choice&C' => '&c'],
],
'multiple' => false,
'expanded' => false,
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/select
[@name="name"]
[./optgroup[@label="[trans]Group&1[/trans]"]
[
./option[@value="&a"][@selected="selected"][.="[trans]Choice&A[/trans]"]
/following-sibling::option[@value="&b"][not(@selected)][.="[trans]Choice&B[/trans]"]
]
[count(./option)=2]
]
[./optgroup[@label="[trans]Group&2[/trans]"]
[./option[@value="&c"][not(@selected)][.="[trans]Choice&C[/trans]"]]
[count(./option)=1]
]
[count(./optgroup)=2]
'
);
}
public function testMultipleChoice()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', ['&a'], [
'choices' => ['Choice&A' => '&a', 'Choice&B' => '&b'],
'required' => true,
'multiple' => true,
'expanded' => false,
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/select
[@name="name[]"]
[@required="required"]
[@multiple="multiple"]
[
./option[@value="&a"][@selected="selected"][.="[trans]Choice&A[/trans]"]
/following-sibling::option[@value="&b"][not(@selected)][.="[trans]Choice&B[/trans]"]
]
[count(./option)=2]
'
);
}
public function testMultipleChoiceAttributes()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', ['&a'], [
'choices' => ['Choice&A' => '&a', 'Choice&B' => '&b'],
'choice_attr' => ['Choice&B' => ['class' => 'foo&bar']],
'required' => true,
'multiple' => true,
'expanded' => false,
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/select
[@name="name[]"]
[@required="required"]
[@multiple="multiple"]
[
./option[@value="&a"][@selected="selected"][.="[trans]Choice&A[/trans]"]
/following-sibling::option[@value="&b"][@class="foo&bar"][not(@selected)][.="[trans]Choice&B[/trans]"]
]
[count(./option)=2]
'
);
}
public function testMultipleChoiceSkipsPlaceholder()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', ['&a'], [
'choices' => ['Choice&A' => '&a', 'Choice&B' => '&b'],
'multiple' => true,
'expanded' => false,
'placeholder' => 'Test&Me',
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/select
[@name="name[]"]
[@multiple="multiple"]
[
./option[@value="&a"][@selected="selected"][.="[trans]Choice&A[/trans]"]
/following-sibling::option[@value="&b"][not(@selected)][.="[trans]Choice&B[/trans]"]
]
[count(./option)=2]
'
);
}
public function testMultipleChoiceNonRequired()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', ['&a'], [
'choices' => ['Choice&A' => '&a', 'Choice&B' => '&b'],
'required' => false,
'multiple' => true,
'expanded' => false,
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/select
[@name="name[]"]
[@multiple="multiple"]
[
./option[@value="&a"][@selected="selected"][.="[trans]Choice&A[/trans]"]
/following-sibling::option[@value="&b"][not(@selected)][.="[trans]Choice&B[/trans]"]
]
[count(./option)=2]
'
);
}
public function testSingleChoiceExpanded()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', '&a', [
'choices' => ['Choice&A' => '&a', 'Choice&B' => '&b'],
'multiple' => false,
'expanded' => true,
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/div
[
./input[@type="radio"][@name="name"][@id="name_0"][@value="&a"][@checked]
/following-sibling::label[@for="name_0"][.="[trans]Choice&A[/trans]"]
/following-sibling::input[@type="radio"][@name="name"][@id="name_1"][@value="&b"][not(@checked)]
/following-sibling::label[@for="name_1"][.="[trans]Choice&B[/trans]"]
/following-sibling::input[@type="hidden"][@id="name__token"]
]
[count(./input)=3]
'
);
}
public function testSingleChoiceExpandedWithoutTranslation()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', '&a', [
'choices' => ['Choice&A' => '&a', 'Choice&B' => '&b'],
'multiple' => false,
'expanded' => true,
'choice_translation_domain' => false,
'placeholder' => 'Placeholder&Not&Translated',
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/div
[
./input[@type="radio"][@name="name"][@id="name_0"][@value="&a"][@checked]
/following-sibling::label[@for="name_0"][.="Choice&A"]
/following-sibling::input[@type="radio"][@name="name"][@id="name_1"][@value="&b"][not(@checked)]
/following-sibling::label[@for="name_1"][.="Choice&B"]
/following-sibling::input[@type="hidden"][@id="name__token"]
]
[count(./input)=3]
'
);
}
public function testSingleChoiceExpandedAttributes()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', '&a', [
'choices' => ['Choice&A' => '&a', 'Choice&B' => '&b'],
'choice_attr' => ['Choice&B' => ['class' => 'foo&bar']],
'multiple' => false,
'expanded' => true,
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/div
[
./input[@type="radio"][@name="name"][@id="name_0"][@value="&a"][@checked]
/following-sibling::label[@for="name_0"][.="[trans]Choice&A[/trans]"]
/following-sibling::input[@type="radio"][@name="name"][@id="name_1"][@value="&b"][@class="foo&bar"][not(@checked)]
/following-sibling::label[@for="name_1"][.="[trans]Choice&B[/trans]"]
/following-sibling::input[@type="hidden"][@id="name__token"]
]
[count(./input)=3]
'
);
}
public function testSingleChoiceExpandedWithPlaceholder()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', '&a', [
'choices' => ['Choice&A' => '&a', 'Choice&B' => '&b'],
'multiple' => false,
'expanded' => true,
'placeholder' => 'Test&Me',
'required' => false,
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/div
[
./input[@type="radio"][@name="name"][@id="name_placeholder"][not(@checked)]
/following-sibling::label[@for="name_placeholder"][.="[trans]Test&Me[/trans]"]
/following-sibling::input[@type="radio"][@name="name"][@id="name_0"][@checked]
/following-sibling::label[@for="name_0"][.="[trans]Choice&A[/trans]"]
/following-sibling::input[@type="radio"][@name="name"][@id="name_1"][not(@checked)]
/following-sibling::label[@for="name_1"][.="[trans]Choice&B[/trans]"]
/following-sibling::input[@type="hidden"][@id="name__token"]
]
[count(./input)=4]
'
);
}
public function testSingleChoiceExpandedWithPlaceholderWithoutTranslation()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', '&a', [
'choices' => ['Choice&A' => '&a', 'Choice&B' => '&b'],
'multiple' => false,
'expanded' => true,
'required' => false,
'choice_translation_domain' => false,
'placeholder' => 'Placeholder&Not&Translated',
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/div
[
./input[@type="radio"][@name="name"][@id="name_placeholder"][not(@checked)]
/following-sibling::label[@for="name_placeholder"][.="Placeholder&Not&Translated"]
/following-sibling::input[@type="radio"][@name="name"][@id="name_0"][@checked]
/following-sibling::label[@for="name_0"][.="Choice&A"]
/following-sibling::input[@type="radio"][@name="name"][@id="name_1"][not(@checked)]
/following-sibling::label[@for="name_1"][.="Choice&B"]
/following-sibling::input[@type="hidden"][@id="name__token"]
]
[count(./input)=4]
'
);
}
public function testSingleChoiceExpandedWithBooleanValue()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', true, [
'choices' => ['Choice&A' => '1', 'Choice&B' => '0'],
'multiple' => false,
'expanded' => true,
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/div
[
./input[@type="radio"][@name="name"][@id="name_0"][@checked]
/following-sibling::label[@for="name_0"][.="[trans]Choice&A[/trans]"]
/following-sibling::input[@type="radio"][@name="name"][@id="name_1"][not(@checked)]
/following-sibling::label[@for="name_1"][.="[trans]Choice&B[/trans]"]
/following-sibling::input[@type="hidden"][@id="name__token"]
]
[count(./input)=3]
'
);
}
public function testMultipleChoiceExpanded()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', ['&a', '&c'], [
'choices' => ['Choice&A' => '&a', 'Choice&B' => '&b', 'Choice&C' => '&c'],
'multiple' => true,
'expanded' => true,
'required' => true,
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/div
[
./input[@type="checkbox"][@name="name[]"][@id="name_0"][@checked][not(@required)]
/following-sibling::label[@for="name_0"][.="[trans]Choice&A[/trans]"]
/following-sibling::input[@type="checkbox"][@name="name[]"][@id="name_1"][not(@checked)][not(@required)]
/following-sibling::label[@for="name_1"][.="[trans]Choice&B[/trans]"]
/following-sibling::input[@type="checkbox"][@name="name[]"][@id="name_2"][@checked][not(@required)]
/following-sibling::label[@for="name_2"][.="[trans]Choice&C[/trans]"]
/following-sibling::input[@type="hidden"][@id="name__token"]
]
[count(./input)=4]
'
);
}
public function testMultipleChoiceExpandedWithoutTranslation()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', ['&a', '&c'], [
'choices' => ['Choice&A' => '&a', 'Choice&B' => '&b', 'Choice&C' => '&c'],
'multiple' => true,
'expanded' => true,
'required' => true,
'choice_translation_domain' => false,
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/div
[
./input[@type="checkbox"][@name="name[]"][@id="name_0"][@checked][not(@required)]
/following-sibling::label[@for="name_0"][.="Choice&A"]
/following-sibling::input[@type="checkbox"][@name="name[]"][@id="name_1"][not(@checked)][not(@required)]
/following-sibling::label[@for="name_1"][.="Choice&B"]
/following-sibling::input[@type="checkbox"][@name="name[]"][@id="name_2"][@checked][not(@required)]
/following-sibling::label[@for="name_2"][.="Choice&C"]
/following-sibling::input[@type="hidden"][@id="name__token"]
]
[count(./input)=4]
'
);
}
public function testMultipleChoiceExpandedAttributes()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ChoiceType', ['&a', '&c'], [
'choices' => ['Choice&A' => '&a', 'Choice&B' => '&b', 'Choice&C' => '&c'],
'choice_attr' => ['Choice&B' => ['class' => 'foo&bar']],
'multiple' => true,
'expanded' => true,
'required' => true,
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/div
[
./input[@type="checkbox"][@name="name[]"][@id="name_0"][@checked][not(@required)]
/following-sibling::label[@for="name_0"][.="[trans]Choice&A[/trans]"]
/following-sibling::input[@type="checkbox"][@name="name[]"][@id="name_1"][@class="foo&bar"][not(@checked)][not(@required)]
/following-sibling::label[@for="name_1"][.="[trans]Choice&B[/trans]"]
/following-sibling::input[@type="checkbox"][@name="name[]"][@id="name_2"][@checked][not(@required)]
/following-sibling::label[@for="name_2"][.="[trans]Choice&C[/trans]"]
/following-sibling::input[@type="hidden"][@id="name__token"]
]
[count(./input)=4]
'
);
}
public function testCountry()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\CountryType', 'AT');
$this->assertWidgetMatchesXpath($form->createView(), [],
'/select
[@name="name"]
[./option[@value="AT"][@selected="selected"][.="Austria"]]
[count(./option)>200]
'
);
}
public function testCountryWithPlaceholder()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\CountryType', 'AT', [
'placeholder' => 'Select&Country',
'required' => false,
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/select
[@name="name"]
[./option[@value=""][not(@selected)][not(@disabled)][.="[trans]Select&Country[/trans]"]]
[./option[@value="AT"][@selected="selected"][.="Austria"]]
[count(./option)>201]
'
);
}
public function testDateTime()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\DateTimeType', date('Y').'-02-03 04:05:06', [
'input' => 'string',
'with_seconds' => false,
'widget' => 'choice',
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/div
[
./div
[@id="name_date"]
[
./select
[@id="name_date_month"]
[./option[@value="2"][@selected="selected"]]
/following-sibling::select
[@id="name_date_day"]
[./option[@value="3"][@selected="selected"]]
/following-sibling::select
[@id="name_date_year"]
[./option[@value="'.date('Y').'"][@selected="selected"]]
]
/following-sibling::div
[@id="name_time"]
[
./select
[@id="name_time_hour"]
[./option[@value="4"][@selected="selected"]]
/following-sibling::select
[@id="name_time_minute"]
[./option[@value="5"][@selected="selected"]]
]
]
[count(.//select)=5]
'
);
}
public function testDateTimeWithPlaceholderGlobal()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\DateTimeType', null, [
'input' => 'string',
'placeholder' => 'Change&Me',
'required' => false,
'widget' => 'choice',
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/div
[
./div
[@id="name_date"]
[
./select
[@id="name_date_month"]
[./option[@value=""][not(@selected)][not(@disabled)][.="[trans]Change&Me[/trans]"]]
/following-sibling::select
[@id="name_date_day"]
[./option[@value=""][not(@selected)][not(@disabled)][.="[trans]Change&Me[/trans]"]]
/following-sibling::select
[@id="name_date_year"]
[./option[@value=""][not(@selected)][not(@disabled)][.="[trans]Change&Me[/trans]"]]
]
/following-sibling::div
[@id="name_time"]
[
./select
[@id="name_time_hour"]
[./option[@value=""][.="[trans]Change&Me[/trans]"]]
/following-sibling::select
[@id="name_time_minute"]
[./option[@value=""][.="[trans]Change&Me[/trans]"]]
]
]
[count(.//select)=5]
'
);
}
public function testDateTimeWithHourAndMinute()
{
$data = ['year' => date('Y'), 'month' => '2', 'day' => '3', 'hour' => '4', 'minute' => '5'];
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\DateTimeType', $data, [
'input' => 'array',
'required' => false,
'widget' => 'choice',
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/div
[
./div
[@id="name_date"]
[
./select
[@id="name_date_month"]
[./option[@value="2"][@selected="selected"]]
/following-sibling::select
[@id="name_date_day"]
[./option[@value="3"][@selected="selected"]]
/following-sibling::select
[@id="name_date_year"]
[./option[@value="'.date('Y').'"][@selected="selected"]]
]
/following-sibling::div
[@id="name_time"]
[
./select
[@id="name_time_hour"]
[./option[@value="4"][@selected="selected"]]
/following-sibling::select
[@id="name_time_minute"]
[./option[@value="5"][@selected="selected"]]
]
]
[count(.//select)=5]
'
);
}
public function testDateTimeWithSeconds()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\DateTimeType', date('Y').'-02-03 04:05:06', [
'input' => 'string',
'with_seconds' => true,
'widget' => 'choice',
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/div
[
./div
[@id="name_date"]
[
./select
[@id="name_date_month"]
[./option[@value="2"][@selected="selected"]]
/following-sibling::select
[@id="name_date_day"]
[./option[@value="3"][@selected="selected"]]
/following-sibling::select
[@id="name_date_year"]
[./option[@value="'.date('Y').'"][@selected="selected"]]
]
/following-sibling::div
[@id="name_time"]
[
./select
[@id="name_time_hour"]
[./option[@value="4"][@selected="selected"]]
/following-sibling::select
[@id="name_time_minute"]
[./option[@value="5"][@selected="selected"]]
/following-sibling::select
[@id="name_time_second"]
[./option[@value="6"][@selected="selected"]]
]
]
[count(.//select)=6]
'
);
}
public function testDateTimeSingleText()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\DateTimeType', '2011-02-03 04:05:06', [
'input' => 'string',
'date_widget' => 'single_text',
'time_widget' => 'single_text',
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/div
[
./input
[@type="date"]
[@id="name_date"]
[@name="name[date]"]
[@value="2011-02-03"]
/following-sibling::input
[@type="time"]
[@id="name_time"]
[@name="name[time]"]
[@value="04:05"]
]
'
);
}
public function testDateTimeWithWidgetSingleText()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\DateTimeType', '2011-02-03 04:05:06', [
'input' => 'string',
'widget' => 'single_text',
'model_timezone' => 'UTC',
'view_timezone' => 'UTC',
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="datetime-local"]
[@name="name"]
[@value="2011-02-03T04:05"]
'
);
}
public function testDateChoice()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\DateType', date('Y').'-02-03', [
'input' => 'string',
'widget' => 'choice',
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/div
[
./select
[@id="name_month"]
[./option[@value="2"][@selected="selected"]]
/following-sibling::select
[@id="name_day"]
[./option[@value="3"][@selected="selected"]]
/following-sibling::select
[@id="name_year"]
[./option[@value="'.date('Y').'"][@selected="selected"]]
]
[count(./select)=3]
'
);
}
public function testDateChoiceWithPlaceholderGlobal()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\DateType', null, [
'input' => 'string',
'widget' => 'choice',
'placeholder' => 'Change&Me',
'required' => false,
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/div
[
./select
[@id="name_month"]
[./option[@value=""][not(@selected)][not(@disabled)][.="[trans]Change&Me[/trans]"]]
/following-sibling::select
[@id="name_day"]
[./option[@value=""][not(@selected)][not(@disabled)][.="[trans]Change&Me[/trans]"]]
/following-sibling::select
[@id="name_year"]
[./option[@value=""][not(@selected)][not(@disabled)][.="[trans]Change&Me[/trans]"]]
]
[count(./select)=3]
'
);
}
public function testDateChoiceWithPlaceholderOnYear()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\DateType', null, [
'input' => 'string',
'widget' => 'choice',
'required' => false,
'placeholder' => ['year' => 'Change&Me'],
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/div
[
./select
[@id="name_month"]
[./option[@value="1"]]
/following-sibling::select
[@id="name_day"]
[./option[@value="1"]]
/following-sibling::select
[@id="name_year"]
[./option[@value=""][not(@selected)][not(@disabled)][.="[trans]Change&Me[/trans]"]]
]
[count(./select)=3]
'
);
}
public function testDateText()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\DateType', '2011-02-03', [
'input' => 'string',
'widget' => 'text',
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/div
[
./input
[@id="name_month"]
[@type="text"]
[@value="2"]
/following-sibling::input
[@id="name_day"]
[@type="text"]
[@value="3"]
/following-sibling::input
[@id="name_year"]
[@type="text"]
[@value="2011"]
]
[count(./input)=3]
'
);
}
public function testDateSingleText()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\DateType', '2011-02-03', [
'input' => 'string',
'widget' => 'single_text',
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="date"]
[@name="name"]
[@value="2011-02-03"]
'
);
}
public function testDateErrorBubbling()
{
$form = $this->factory->createNamedBuilder('form', 'Symfony\Component\Form\Extension\Core\Type\FormType')
->add('date', 'Symfony\Component\Form\Extension\Core\Type\DateType', ['widget' => 'choice'])
->getForm();
$form->get('date')->addError(new FormError('[trans]Error![/trans]'));
$view = $form->createView();
$this->assertSame('', $this->renderErrors($view));
$this->assertNotEmpty($this->renderErrors($view['date']));
}
public function testBirthDay()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\BirthdayType', '2000-02-03', [
'input' => 'string',
'widget' => 'choice',
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/div
[
./select
[@id="name_month"]
[./option[@value="2"][@selected="selected"]]
/following-sibling::select
[@id="name_day"]
[./option[@value="3"][@selected="selected"]]
/following-sibling::select
[@id="name_year"]
[./option[@value="2000"][@selected="selected"]]
]
[count(./select)=3]
'
);
}
public function testBirthDayWithPlaceholder()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\BirthdayType', '1950-01-01', [
'input' => 'string',
'placeholder' => '',
'required' => false,
'widget' => 'choice',
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/div
[
./select
[@id="name_month"]
[./option[@value=""][not(@selected)][not(@disabled)][.=""]]
[./option[@value="1"][@selected="selected"]]
/following-sibling::select
[@id="name_day"]
[./option[@value=""][not(@selected)][not(@disabled)][.=""]]
[./option[@value="1"][@selected="selected"]]
/following-sibling::select
[@id="name_year"]
[./option[@value=""][not(@selected)][not(@disabled)][.=""]]
[./option[@value="1950"][@selected="selected"]]
]
[count(./select)=3]
'
);
}
public function testEmail()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\EmailType', 'foo&bar');
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="email"]
[@name="name"]
[@value="foo&bar"]
[not(@maxlength)]
'
);
}
public function testEmailWithMaxLength()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\EmailType', 'foo&bar', [
'attr' => ['maxlength' => 123],
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="email"]
[@name="name"]
[@value="foo&bar"]
[@maxlength="123"]
'
);
}
public function testFile()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\FileType');
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="file"]
'
);
}
public function testHidden()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\HiddenType', 'foo&bar');
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="hidden"]
[@name="name"]
[@value="foo&bar"]
'
);
}
public function testDisabled()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TextType', null, [
'disabled' => true,
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="text"]
[@name="name"]
[@disabled="disabled"]
'
);
}
public function testInteger()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\IntegerType', 123);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="number"]
[@name="name"]
[@value="123"]
'
);
}
public function testIntegerTypeWithGroupingRendersAsTextInput()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\IntegerType', 123, [
'grouping' => true,
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="text"]
[@name="name"]
[@value="123"]
'
);
}
public function testLanguage()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\LanguageType', 'de');
$this->assertWidgetMatchesXpath($form->createView(), [],
'/select
[@name="name"]
[./option[@value="de"][@selected="selected"][.="German"]]
[count(./option)>200]
'
);
}
public function testLocale()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\LocaleType', 'de_AT');
$this->assertWidgetMatchesXpath($form->createView(), [],
'/select
[@name="name"]
[./option[@value="de_AT"][@selected="selected"][.="German (Austria)"]]
[count(./option)>200]
'
);
}
public function testMoney()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\MoneyType', 1234.56, [
'currency' => 'EUR',
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="text"]
[@name="name"]
[@value="1234.56"]
[contains(.., "€")]
'
);
}
public function testNumber()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\NumberType', 1234.56);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="text"]
[@name="name"]
[@value="1234.56"]
'
);
}
public function testRenderNumberWithHtml5NumberType()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\NumberType', 1234.56, [
'html5' => true,
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="number"]
[@step="any"]
[@name="name"]
[@value="1234.56"]
'
);
}
public function testRenderNumberWithHtml5NumberTypeAndStepAttribute()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\NumberType', 1234.56, [
'html5' => true,
'attr' => ['step' => '0.1'],
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="number"]
[@step="0.1"]
[@name="name"]
[@value="1234.56"]
'
);
}
public function testPassword()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\PasswordType', 'foo&bar');
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="password"]
[@name="name"]
'
);
}
public function testPasswordSubmittedWithNotAlwaysEmpty()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\PasswordType', null, [
'always_empty' => false,
]);
$form->submit('foo&bar');
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="password"]
[@name="name"]
[@value="foo&bar"]
'
);
}
public function testPasswordWithMaxLength()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\PasswordType', 'foo&bar', [
'attr' => ['maxlength' => 123],
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="password"]
[@name="name"]
[@maxlength="123"]
'
);
}
public function testPercent()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\PercentType', 0.1, ['rounding_mode' => \NumberFormatter::ROUND_CEILING]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="text"]
[@name="name"]
[@value="10"]
[contains(.., "%")]
'
);
}
public function testPercentNoSymbol()
{
$form = $this->factory->createNamed('name', PercentType::class, 0.1, ['symbol' => false, 'rounding_mode' => \NumberFormatter::ROUND_CEILING]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="text"]
[@name="name"]
[@value="10"]
[not(contains(.., "%"))]
'
);
}
public function testPercentCustomSymbol()
{
$form = $this->factory->createNamed('name', PercentType::class, 0.1, ['symbol' => '‱', 'rounding_mode' => \NumberFormatter::ROUND_CEILING]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="text"]
[@name="name"]
[@value="10"]
[contains(.., "‱")]
'
);
}
public function testCheckedRadio()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\RadioType', true);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="radio"]
[@name="name"]
[@checked="checked"]
[@value="1"]
'
);
}
public function testUncheckedRadio()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\RadioType', false);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="radio"]
[@name="name"]
[not(@checked)]
'
);
}
public function testRadioWithValue()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\RadioType', false, [
'value' => 'foo&bar',
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="radio"]
[@name="name"]
[@value="foo&bar"]
'
);
}
public function testRange()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\RangeType', 42, ['attr' => ['min' => 5]]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="range"]
[@name="name"]
[@value="42"]
[@min="5"]
'
);
}
public function testRangeWithMinMaxValues()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\RangeType', 42, ['attr' => ['min' => 5, 'max' => 57]]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="range"]
[@name="name"]
[@value="42"]
[@min="5"]
[@max="57"]
'
);
}
public function testTextarea()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TextareaType', 'foo&bar', [
'attr' => ['pattern' => 'foo'],
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/textarea
[@name="name"]
[not(@pattern)]
[.="foo&bar"]
'
);
}
public function testText()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TextType', 'foo&bar');
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="text"]
[@name="name"]
[@value="foo&bar"]
[not(@maxlength)]
'
);
}
public function testTextWithMaxLength()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TextType', 'foo&bar', [
'attr' => ['maxlength' => 123],
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="text"]
[@name="name"]
[@value="foo&bar"]
[@maxlength="123"]
'
);
}
public function testSearch()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\SearchType', 'foo&bar');
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="search"]
[@name="name"]
[@value="foo&bar"]
[not(@maxlength)]
'
);
}
public function testTime()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TimeType', '04:05:06', [
'input' => 'string',
'with_seconds' => false,
'widget' => 'choice',
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/div
[
./select
[@id="name_hour"]
[not(@size)]
[./option[@value="4"][@selected="selected"]]
/following-sibling::select
[@id="name_minute"]
[not(@size)]
[./option[@value="5"][@selected="selected"]]
]
[count(./select)=2]
'
);
}
public function testTimeWithSeconds()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TimeType', '04:05:06', [
'input' => 'string',
'with_seconds' => true,
'widget' => 'choice',
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/div
[
./select
[@id="name_hour"]
[not(@size)]
[./option[@value="4"][@selected="selected"]]
[count(./option)>23]
/following-sibling::select
[@id="name_minute"]
[not(@size)]
[./option[@value="5"][@selected="selected"]]
[count(./option)>59]
/following-sibling::select
[@id="name_second"]
[not(@size)]
[./option[@value="6"][@selected="selected"]]
[count(./option)>59]
]
[count(./select)=3]
'
);
}
public function testTimeText()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TimeType', '04:05:06', [
'input' => 'string',
'widget' => 'text',
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/div
[
./input
[@type="text"]
[@id="name_hour"]
[@name="name[hour]"]
[@value="04"]
[@size="1"]
[@required="required"]
/following-sibling::input
[@type="text"]
[@id="name_minute"]
[@name="name[minute]"]
[@value="05"]
[@size="1"]
[@required="required"]
]
[count(./input)=2]
'
);
}
public function testTimeSingleText()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TimeType', '04:05:06', [
'input' => 'string',
'widget' => 'single_text',
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="time"]
[@name="name"]
[@value="04:05"]
[not(@size)]
'
);
}
public function testTimeWithPlaceholderGlobal()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TimeType', null, [
'input' => 'string',
'placeholder' => 'Change&Me',
'required' => false,
'widget' => 'choice',
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/div
[
./select
[@id="name_hour"]
[./option[@value=""][not(@selected)][not(@disabled)][.="[trans]Change&Me[/trans]"]]
[count(./option)>24]
/following-sibling::select
[@id="name_minute"]
[./option[@value=""][not(@selected)][not(@disabled)][.="[trans]Change&Me[/trans]"]]
[count(./option)>60]
]
[count(./select)=2]
'
);
}
public function testTimeWithPlaceholderOnYear()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TimeType', null, [
'input' => 'string',
'required' => false,
'placeholder' => ['hour' => 'Change&Me'],
'widget' => 'choice',
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/div
[
./select
[@id="name_hour"]
[./option[@value=""][not(@selected)][not(@disabled)][.="[trans]Change&Me[/trans]"]]
[count(./option)>24]
/following-sibling::select
[@id="name_minute"]
[./option[@value="1"]]
[count(./option)>59]
]
[count(./select)=2]
'
);
}
public function testTimeErrorBubbling()
{
$form = $this->factory->createNamedBuilder('form', 'Symfony\Component\Form\Extension\Core\Type\FormType')
->add('time', 'Symfony\Component\Form\Extension\Core\Type\TimeType', ['widget' => 'choice'])
->getForm();
$form->get('time')->addError(new FormError('[trans]Error![/trans]'));
$view = $form->createView();
$this->assertSame('', $this->renderErrors($view));
$this->assertNotEmpty($this->renderErrors($view['time']));
}
public function testTimezone()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TimezoneType', 'Europe/Vienna');
$this->assertWidgetMatchesXpath($form->createView(), [],
'/select
[@name="name"]
[not(@required)]
[./option[@value="Europe/Vienna"][@selected="selected"][.="Europe / Vienna"]]
[count(./option)>200]
'
);
}
public function testTimezoneWithPlaceholder()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TimezoneType', null, [
'placeholder' => 'Select&Timezone',
'required' => false,
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/select
[./option[@value=""][not(@selected)][not(@disabled)][.="[trans]Select&Timezone[/trans]"]]
[count(./option)>201]
'
);
}
public function testUrlWithDefaultProtocol()
{
$url = 'http://www.example.com?foo1=bar1&foo2=bar2';
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\UrlType', $url, ['default_protocol' => 'http']);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="text"]
[@name="name"]
[@value="http://www.example.com?foo1=bar1&foo2=bar2"]
[@inputmode="url"]
'
);
}
public function testUrlWithoutDefaultProtocol()
{
$url = 'http://www.example.com?foo1=bar1&foo2=bar2';
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\UrlType', $url, ['default_protocol' => null]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="url"]
[@name="name"]
[@value="http://www.example.com?foo1=bar1&foo2=bar2"]
'
);
}
public function testCollectionPrototype()
{
$form = $this->factory->createNamedBuilder('name', 'Symfony\Component\Form\Extension\Core\Type\FormType', ['items' => ['one', 'two', 'three']])
->add('items', 'Symfony\Component\Form\Extension\Core\Type\CollectionType', ['allow_add' => true])
->getForm()
->createView();
$html = $this->renderWidget($form);
$this->assertMatchesXpath($html,
'//div[@id="name_items"][@data-prototype]
|
//table[@id="name_items"][@data-prototype]'
);
}
public function testEmptyRootFormName()
{
$form = $this->factory->createNamedBuilder('', 'Symfony\Component\Form\Extension\Core\Type\FormType')
->add('child', 'Symfony\Component\Form\Extension\Core\Type\TextType')
->getForm();
$this->assertMatchesXpath($this->renderWidget($form->createView()),
'//input[@type="hidden"][@id="_token"][@name="_token"]
|
//input[@type="text"][@id="child"][@name="child"]', 2);
}
public function testButton()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ButtonType');
$this->assertWidgetMatchesXpath($form->createView(), [],
'/button[@type="button"][@name="name"][.="[trans]Name[/trans]"]'
);
}
public function testButtonLabelIsEmpty()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ButtonType');
$this->assertSame('', $this->renderLabel($form->createView()));
}
public function testButtonlabelWithoutTranslation()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ButtonType', null, [
'translation_domain' => false,
]);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/button[@type="button"][@name="name"][.="Name"]'
);
}
public function testSubmit()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\SubmitType');
$this->assertWidgetMatchesXpath($form->createView(), [],
'/button[@type="submit"][@name="name"]'
);
}
public function testReset()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ResetType');
$this->assertWidgetMatchesXpath($form->createView(), [],
'/button[@type="reset"][@name="name"]'
);
}
public function testStartTag()
{
$form = $this->factory->create('Symfony\Component\Form\Extension\Core\Type\FormType', null, [
'method' => 'get',
'action' => 'http://example.com/directory',
]);
$html = $this->renderStart($form->createView());
$this->assertSame('<form name="form" method="get" action="http://example.com/directory">', $html);
}
public function testStartTagForPutRequest()
{
$form = $this->factory->create('Symfony\Component\Form\Extension\Core\Type\FormType', null, [
'method' => 'put',
'action' => 'http://example.com/directory',
]);
$html = $this->renderStart($form->createView());
$this->assertMatchesXpath($html.'</form>',
'/form
[./input[@type="hidden"][@name="_method"][@value="PUT"]]
[@method="post"]
[@action="http://example.com/directory"]'
);
}
public function testStartTagWithOverriddenVars()
{
$form = $this->factory->create('Symfony\Component\Form\Extension\Core\Type\FormType', null, [
'method' => 'put',
'action' => 'http://example.com/directory',
]);
$html = $this->renderStart($form->createView(), [
'method' => 'post',
'action' => 'http://foo.com/directory',
]);
$this->assertSame('<form name="form" method="post" action="http://foo.com/directory">', $html);
}
public function testStartTagForMultipartForm()
{
$form = $this->factory->createBuilder('Symfony\Component\Form\Extension\Core\Type\FormType', null, [
'method' => 'get',
'action' => 'http://example.com/directory',
])
->add('file', 'Symfony\Component\Form\Extension\Core\Type\FileType')
->getForm();
$html = $this->renderStart($form->createView());
$this->assertSame('<form name="form" method="get" action="http://example.com/directory" enctype="multipart/form-data">', $html);
}
public function testStartTagWithExtraAttributes()
{
$form = $this->factory->create('Symfony\Component\Form\Extension\Core\Type\FormType', null, [
'method' => 'get',
'action' => 'http://example.com/directory',
]);
$html = $this->renderStart($form->createView(), [
'attr' => ['class' => 'foobar'],
]);
$this->assertSame('<form name="form" method="get" action="http://example.com/directory" class="foobar">', $html);
}
public function testWidgetAttributes()
{
$form = $this->factory->createNamed('text', 'Symfony\Component\Form\Extension\Core\Type\TextType', 'value', [
'required' => true,
'disabled' => true,
'attr' => ['readonly' => true, 'maxlength' => 10, 'pattern' => '\d+', 'class' => 'foobar', 'data-foo' => 'bar'],
]);
$html = $this->renderWidget($form->createView());
// compare plain HTML to check the whitespace
$this->assertSame('<input type="text" id="text" name="text" disabled="disabled" required="required" readonly="readonly" maxlength="10" pattern="\d+" class="foobar" data-foo="bar" value="value" />', $html);
}
public function testWidgetAttributeNameRepeatedIfTrue()
{
$form = $this->factory->createNamed('text', 'Symfony\Component\Form\Extension\Core\Type\TextType', 'value', [
'attr' => ['foo' => true],
]);
$html = $this->renderWidget($form->createView());
// foo="foo"
$this->assertSame('<input type="text" id="text" name="text" required="required" foo="foo" value="value" />', $html);
}
public function testWidgetAttributeHiddenIfFalse()
{
$form = $this->factory->createNamed('text', 'Symfony\Component\Form\Extension\Core\Type\TextType', 'value', [
'attr' => ['foo' => false],
]);
$html = $this->renderWidget($form->createView());
$this->assertStringNotContainsString('foo="', $html);
}
public function testButtonAttributes()
{
$form = $this->factory->createNamed('button', 'Symfony\Component\Form\Extension\Core\Type\ButtonType', null, [
'disabled' => true,
'attr' => ['class' => 'foobar', 'data-foo' => 'bar'],
]);
$html = $this->renderWidget($form->createView());
// compare plain HTML to check the whitespace
$this->assertSame('<button type="button" id="button" name="button" disabled="disabled" class="foobar" data-foo="bar">[trans]Button[/trans]</button>', $html);
}
public function testButtonAttributeNameRepeatedIfTrue()
{
$form = $this->factory->createNamed('button', 'Symfony\Component\Form\Extension\Core\Type\ButtonType', null, [
'attr' => ['foo' => true],
]);
$html = $this->renderWidget($form->createView());
// foo="foo"
$this->assertSame('<button type="button" id="button" name="button" foo="foo">[trans]Button[/trans]</button>', $html);
}
public function testButtonAttributeHiddenIfFalse()
{
$form = $this->factory->createNamed('button', 'Symfony\Component\Form\Extension\Core\Type\ButtonType', null, [
'attr' => ['foo' => false],
]);
$html = $this->renderWidget($form->createView());
$this->assertStringNotContainsString('foo="', $html);
}
public function testTextareaWithWhitespaceOnlyContentRetainsValue()
{
$form = $this->factory->createNamed('textarea', 'Symfony\Component\Form\Extension\Core\Type\TextareaType', ' ');
$html = $this->renderWidget($form->createView());
$this->assertStringContainsString('> </textarea>', $html);
}
public function testTextareaWithWhitespaceOnlyContentRetainsValueWhenRenderingForm()
{
$form = $this->factory->createBuilder('Symfony\Component\Form\Extension\Core\Type\FormType', ['textarea' => ' '])
->add('textarea', 'Symfony\Component\Form\Extension\Core\Type\TextareaType')
->getForm();
$html = $this->renderForm($form->createView());
$this->assertStringContainsString('> </textarea>', $html);
}
public function testWidgetContainerAttributeHiddenIfFalse()
{
$form = $this->factory->createNamed('form', 'Symfony\Component\Form\Extension\Core\Type\FormType', null, [
'attr' => ['foo' => false],
]);
$html = $this->renderWidget($form->createView());
// no foo
$this->assertStringNotContainsString('foo="', $html);
}
public function testTranslatedAttributes()
{
$view = $this->factory->createNamedBuilder('name', 'Symfony\Component\Form\Extension\Core\Type\FormType')
->add('firstName', 'Symfony\Component\Form\Extension\Core\Type\TextType', ['attr' => ['title' => 'Foo']])
->add('lastName', 'Symfony\Component\Form\Extension\Core\Type\TextType', ['attr' => ['placeholder' => 'Bar']])
->getForm()
->createView();
$html = $this->renderForm($view);
$this->assertMatchesXpath($html, '/form//input[@title="[trans]Foo[/trans]"]');
$this->assertMatchesXpath($html, '/form//input[@placeholder="[trans]Bar[/trans]"]');
}
public function testAttributesNotTranslatedWhenTranslationDomainIsFalse()
{
$view = $this->factory->createNamedBuilder('name', 'Symfony\Component\Form\Extension\Core\Type\FormType', null, [
'translation_domain' => false,
])
->add('firstName', 'Symfony\Component\Form\Extension\Core\Type\TextType', ['attr' => ['title' => 'Foo']])
->add('lastName', 'Symfony\Component\Form\Extension\Core\Type\TextType', ['attr' => ['placeholder' => 'Bar']])
->getForm()
->createView();
$html = $this->renderForm($view);
$this->assertMatchesXpath($html, '/form//input[@title="Foo"]');
$this->assertMatchesXpath($html, '/form//input[@placeholder="Bar"]');
}
public function testTel()
{
$tel = '0102030405';
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TelType', $tel);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="tel"]
[@name="name"]
[@value="0102030405"]
'
);
}
public function testColor()
{
$color = '#0000ff';
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\ColorType', $color);
$this->assertWidgetMatchesXpath($form->createView(), [],
'/input
[@type="color"]
[@name="name"]
[@value="#0000ff"]
'
);
}
public function testLabelWithTranslationParameters()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TextType');
$html = $this->renderLabel($form->createView(), 'Address is %address%', [
'label_translation_parameters' => [
'%address%' => 'Paris, rue de la Paix',
],
]);
$this->assertMatchesXpath($html,
'/label
[@for="name"]
[.="[trans]Address is Paris, rue de la Paix[/trans]"]
'
);
}
public function testHelpWithTranslationParameters()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TextType', null, [
'help' => 'for company %company%',
'help_translation_parameters' => [
'%company%' => 'ACME Ltd.',
],
]);
$html = $this->renderHelp($form->createView());
$this->assertMatchesXpath($html,
'/*
[@id="name_help"]
[.="[trans]for company ACME Ltd.[/trans]"]
'
);
}
public function testLabelWithTranslatableMessage()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TextType', null, [
'label' => new TranslatableMessage('foo'),
]);
$html = $this->renderLabel($form->createView());
$this->assertMatchesXpath($html,
'/label
[@for="name"]
[.="[trans]foo[/trans]"]
'
);
}
public function testHelpWithTranslatableMessage()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TextType', null, [
'help' => new TranslatableMessage('foo'),
]);
$html = $this->renderHelp($form->createView());
$this->assertMatchesXpath($html,
'/*
[@id="name_help"]
[.="[trans]foo[/trans]"]
'
);
}
public function testHelpWithTranslatableInterface()
{
$message = new class implements TranslatableInterface {
public function trans(TranslatorInterface $translator, ?string $locale = null): string
{
return $translator->trans('foo');
}
};
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TextType', null, [
'help' => $message,
]);
$html = $this->renderHelp($form->createView());
$this->assertMatchesXpath($html,
'/*
[@id="name_help"]
[.="[trans]foo[/trans]"]
'
);
}
public function testAttributesWithTranslationParameters()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\TextType', null, [
'attr' => [
'title' => 'Message to %company%',
'placeholder' => 'Enter a message to %company%',
],
'attr_translation_parameters' => [
'%company%' => 'ACME Ltd.',
],
]);
$html = $this->renderWidget($form->createView());
$this->assertMatchesXpath($html,
'/input
[@title="[trans]Message to ACME Ltd.[/trans]"]
[@placeholder="[trans]Enter a message to ACME Ltd.[/trans]"]
'
);
}
public function testButtonWithTranslationParameters()
{
$form = $this->factory->createNamedBuilder('myform')
->add('mybutton', 'Symfony\Component\Form\Extension\Core\Type\ButtonType', [
'label' => 'Submit to %company%',
'label_translation_parameters' => [
'%company%' => 'ACME Ltd.',
],
])
->getForm();
$view = $form->get('mybutton')->createView();
$html = $this->renderWidget($view, ['label_format' => 'form.%name%']);
$this->assertMatchesXpath($html,
'/button
[.="[trans]Submit to ACME Ltd.[/trans]"]
'
);
}
#[DataProvider('submitFormNoValidateProvider')]
public function testSubmitFormNoValidate(bool $validate)
{
$form = $this->factory->create(SubmitType::class, null, [
'validate' => $validate,
]);
$html = $this->renderWidget($form->createView());
$xpath = '/button
[@type="submit"]
';
if (!$validate) {
$xpath .= '[@formnovalidate="formnovalidate"]';
} else {
$xpath .= '[not(@formnovalidate="formnovalidate")]';
}
$this->assertMatchesXpath($html, $xpath);
}
public static function submitFormNoValidateProvider()
{
return [
[false],
[true],
];
}
public function testWeekSingleText()
{
$form = $this->factory->createNamed('holidays', 'Symfony\Component\Form\Extension\Core\Type\WeekType', '1970-W01', [
'input' => 'string',
'widget' => 'single_text',
]);
$this->assertWidgetMatchesXpath($form->createView(), ['attr' => ['class' => 'my&class']],
'/input
[@type="week"]
[@name="holidays"]
[@class="my&class"]
[@value="1970-W01"]
'
);
}
public function testWeekSingleTextNoHtml5()
{
$form = $this->factory->createNamed('holidays', 'Symfony\Component\Form\Extension\Core\Type\WeekType', '1970-W01', [
'input' => 'string',
'widget' => 'single_text',
'html5' => false,
]);
$this->assertWidgetMatchesXpath($form->createView(), ['attr' => ['class' => 'my&class']],
'/input
[@type="text"]
[@name="holidays"]
[@class="my&class"]
[@value="1970-W01"]
'
);
}
public function testWeekChoices()
{
$data = ['year' => (int) date('Y'), 'week' => 1];
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\WeekType', $data, [
'input' => 'array',
'widget' => 'choice',
]);
$this->assertWidgetMatchesXpath($form->createView(), ['attr' => ['class' => 'my&class']],
'/div
[@class="my&class"]
[
./select
[@id="name_year"]
[./option[@value="'.$data['year'].'"][@selected="selected"]]
/following-sibling::select
[@id="name_week"]
[./option[@value="'.$data['week'].'"][@selected="selected"]]
]
[count(.//select)=2]'
);
}
public function testWeekText()
{
$form = $this->factory->createNamed('name', 'Symfony\Component\Form\Extension\Core\Type\WeekType', '2000-W01', [
'input' => 'string',
'widget' => 'text',
]);
$this->assertWidgetMatchesXpath($form->createView(), ['attr' => ['class' => 'my&class']],
'/div
[@class="my&class"]
[
./input
[@id="name_year"]
[@type="number"]
[@value="2000"]
/following-sibling::input
[@id="name_week"]
[@type="number"]
[@value="1"]
]
[count(./input)=2]'
);
}
}
|
php
|
github
|
https://github.com/symfony/symfony
|
src/Symfony/Bridge/Twig/Tests/Extension/AbstractLayoutTestCase.php
|
# Natural Language Toolkit: Clusterers
#
# Copyright (C) 2004-2006 University of Melbourne
# Author: Trevor Cohn <tacohn@cs.mu.oz.au>
# Porting: Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
This module contains a number of basic clustering algorithms. Clustering
describes the task of discovering groups of similar items with a large
collection. It is also describe as unsupervised machine learning, as the data
from which it learns is unannotated with class information, as is the case for
supervised learning. Annotated data is difficult and expensive to obtain in
the quantities required for the majority of supervised learning algorithms.
This problem, the knowledge acquisition bottleneck, is common to most natural
language processing tasks, thus fueling the need for quality unsupervised
approaches.
This module contains a k-means clusterer, E-M clusterer and a group average
agglomerative clusterer (GAAC). All these clusterers involve finding good
cluster groupings for a set of vectors in multi-dimensional space.
The K-means clusterer starts with k arbitrary chosen means then allocates each
vector to the cluster with the closest mean. It then recalculates the means of
each cluster as the centroid of the vectors in the cluster. This process
repeats until the cluster memberships stabilise. This is a hill-climbing
algorithm which may converge to a local maximum. Hence the clustering is
often repeated with random initial means and the most commonly occurring
output means are chosen.
The GAAC clusterer starts with each of the M{N} vectors as singleton clusters.
It then iteratively merges pairs of clusters which have the closest centroids.
This continues until there is only one cluster. The order of merges gives rise
to a dendogram - a tree with the earlier merges lower than later merges. The
membership of a given number of clusters M{c}, M{1 <= c <= N}, can be found by
cutting the dendogram at depth M{c}.
The Gaussian EM clusterer models the vectors as being produced by a mixture
of k Gaussian sources. The parameters of these sources (prior probability,
mean and covariance matrix) are then found to maximise the likelihood of the
given data. This is done with the expectation maximisation algorithm. It
starts with k arbitrarily chosen means, priors and covariance matrices. It
then calculates the membership probabilities for each vector in each of the
clusters - this is the 'E' step. The cluster parameters are then updated in
the 'M' step using the maximum likelihood estimate from the cluster membership
probabilities. This process continues until the likelihood of the data does
not significantly increase.
They all extend the ClusterI interface which defines common operations
available with each clusterer. These operations include.
- cluster: clusters a sequence of vectors
- classify: assign a vector to a cluster
- classification_probdist: give the probability distribution over cluster memberships
The current existing classifiers also extend cluster.VectorSpace, an
abstract class which allows for singular value decomposition (SVD) and vector
normalisation. SVD is used to reduce the dimensionality of the vector space in
such a manner as to preserve as much of the variation as possible, by
reparameterising the axes in order of variability and discarding all bar the
first d dimensions. Normalisation ensures that vectors fall in the unit
hypersphere.
Usage example (see also demo())::
vectors = [array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0]]]
# initialise the clusterer (will also assign the vectors to clusters)
clusterer = cluster.KMeans(2, euclidean_distance)
clusterer.cluster(vectors, True)
# classify a new vector
print clusterer.classify(array([3, 3]))
Note that the vectors must use numpy array-like
objects. nltk_contrib.unimelb.tacohn.SparseArrays may be used for
efficiency when required.
"""
from en.parser.nltk_lite.probability import DictionaryProbDist
import copy, numpy, math, random, sys, types
from numpy import array, linalg
#======================================================================
# Generic interfaces
#======================================================================
class ClusterI:
"""
Interface covering basic clustering functionality.
"""
def cluster(self, vectors, assign_clusters=False):
"""
Assigns the vectors to clusters, learning the clustering parameters
from the data. Returns a cluster identifier for each vector.
"""
raise AssertionError()
def classify(self, token):
"""
Classifies the token into a cluster, setting the token's CLUSTER
parameter to that cluster identifier.
"""
raise AssertionError()
def likelihood(self, vector, label):
"""
Returns the likelihood (a float) of the token having the
corresponding cluster.
"""
if self.classify(vector) == label:
return 1.0
else:
return 0.0
def classification_probdist(self, vector):
"""
Classifies the token into a cluster, returning
a probability distribution over the cluster identifiers.
"""
likelihoods = {}
sum = 0.0
for cluster in self.cluster_names():
likelihoods[cluster] = self.likelihood(vector, cluster)
sum += likelihoods[cluster]
for cluster in self.cluster_names():
likelihoods[cluster] /= sum
return DictionaryProbDist(likelihoods)
def num_clusters(self):
"""
Returns the number of clusters.
"""
raise AssertError()
def cluster_names(self):
"""
Returns the names of the clusters.
"""
return range(self.num_clusters())
def cluster_name(self, index):
"""
Returns the names of the cluster at index.
"""
return index
class VectorSpace(ClusterI):
"""
Abstract clusterer which takes tokens and maps them into a vector space.
Optionally performs singular value decomposition to reduce the
dimensionality.
"""
def __init__(self, normalise=False, svd_dimensions=None):
"""
@param normalise: should vectors be normalised to length 1
@type normalise: boolean
@param svd_dimensions: number of dimensions to use in reducing vector
dimensionsionality with SVD
@type svd_dimensions: int
"""
self._Tt = None
self._should_normalise = normalise
self._svd_dimensions = svd_dimensions
def cluster(self, vectors, assign_clusters=False, trace=False):
assert len(vectors) > 0
# normalise the vectors
if self._should_normalise:
vectors = map(self._normalise, vectors)
# use SVD to reduce the dimensionality
if self._svd_dimensions and self._svd_dimensions < len(vectors[0]):
[u, d, vt] = linalg.svd(numpy.transpose(array(vectors)))
S = d[:self._svd_dimensions] * \
numpy.identity(self._svd_dimensions, numpy.Float64)
T = u[:,:self._svd_dimensions]
Dt = vt[:self._svd_dimensions,:]
vectors = numpy.transpose(numpy.matrixmultiply(S, Dt))
self._Tt = numpy.transpose(T)
# call abstract method to cluster the vectors
self.cluster_vectorspace(vectors, trace)
# assign the vectors to clusters
if assign_clusters:
print self._Tt, vectors
return [self.classify(vector) for vector in vectors]
def cluster_vectorspace(self, vectors, trace):
"""
Finds the clusters using the given set of vectors.
"""
raise AssertionError()
def classify(self, vector):
if self._should_normalise:
vector = self._normalise(vector)
if self._Tt != None:
vector = numpy.matrixmultiply(self._Tt, vector)
cluster = self.classify_vectorspace(vector)
return self.cluster_name(cluster)
def classify_vectorspace(self, vector):
"""
Returns the index of the appropriate cluster for the vector.
"""
raise AssertionError()
def likelihood(self, vector, label):
if self._should_normalise:
vector = self._normalise(vector)
if self._Tt != None:
vector = numpy.matrixmultiply(self._Tt, vector)
return self.likelihood_vectorspace(vector, label)
def likelihood_vectorspace(self, vector, cluster):
"""
Returns the likelihood of the vector belonging to the cluster.
"""
predicted = self.classify_vectorspace(vector)
if cluster == predicted: return 1.0
else: return 0.0
def vector(self, vector):
"""
Returns the vector after normalisation and dimensionality reduction
"""
if self._should_normalise:
vector = self._normalise(vector)
if self._Tt != None:
vector = numpy.matrixmultiply(self._Tt, vector)
return vector
def _normalise(self, vector):
"""
Normalises the vector to unit length.
"""
return vector / math.sqrt(numpy.dot(vector, vector))
class _DendogramNode:
""" Tree node of a dendogram. """
def __init__(self, value, *children):
self._value = value
self._children = children
def leaves(self, values=True):
if self._children:
leaves = []
for child in self._children:
leaves.extend(child.leaves(values))
return leaves
elif values:
return [self._value]
else:
return [self]
def groups(self, n):
queue = [(self._value, self)]
while len(queue) < n:
priority, node = queue.pop()
if not node._children:
queue.push((priority, node))
break
for child in node._children:
if child._children:
queue.append((child._value, child))
else:
queue.append((0, child))
# makes the earliest merges at the start, latest at the end
queue.sort()
groups = []
for priority, node in queue:
groups.append(node.leaves())
return groups
class Dendogram:
"""
Represents a dendogram, a tree with a specified branching order. This
must be initialised with the leaf items, then iteratively call merge for
each branch. This class constructs a tree representing the order of calls
to the merge function.
"""
def __init__(self, items=[]):
"""
@param items: the items at the leaves of the dendogram
@type items: sequence of (any)
"""
self._items = [_DendogramNode(item) for item in items]
self._original_items = copy.copy(self._items)
self._merge = 1
def merge(self, *indices):
"""
Merges nodes at given indices in the dendogram. The nodes will be
combined which then replaces the first node specified. All other nodes
involved in the merge will be removed.
@param indices: indices of the items to merge (at least two)
@type indices: seq of int
"""
assert len(indices) >= 2
node = _DendogramNode(self._merge, *[self._items[i] for i in indices])
self._merge += 1
self._items[indices[0]] = node
for i in indices[1:]:
del self._items[i]
def groups(self, n):
"""
Finds the n-groups of items (leaves) reachable from a cut at depth n.
@param n: number of groups
@type n: int
"""
if len(self._items) > 1:
root = _DendogramNode(self._merge, *self._items)
else:
root = self._items[0]
return root.groups(n)
def show(self):
"""
Print the dendogram in ASCII art to standard out.
"""
# ASCII rendering characters
JOIN, HLINK, VLINK = '+', '-', '|'
# find the root (or create one)
if len(self._items) > 1:
root = _DendogramNode(self._merge, *self._items)
else:
root = self._items[0]
leaves = self._original_items
# find the bottom row and the best cell width
last_row = [str(leaf._value) for leaf in leaves]
width = max(map(len, last_row)) + 1
lhalf = width / 2
rhalf = width - lhalf - 1
# display functions
def format(centre, left=' ', right=' '):
return '%s%s%s' % (lhalf*left, centre, right*rhalf)
def display(str):
sys.stdout.write(str)
# for each merge, top down
queue = [(root._value, root)]
verticals = [ format(' ') for leaf in leaves ]
while queue:
priority, node = queue.pop()
child_left_leaf = map(lambda c: c.leaves(False)[0], node._children)
indices = map(leaves.index, child_left_leaf)
if child_left_leaf:
min_idx = min(indices)
max_idx = max(indices)
for i in range(len(leaves)):
if leaves[i] in child_left_leaf:
if i == min_idx: display(format(JOIN, ' ', HLINK))
elif i == max_idx: display(format(JOIN, HLINK, ' '))
else: display(format(JOIN, HLINK, HLINK))
verticals[i] = format(VLINK)
elif min_idx <= i <= max_idx:
display(format(HLINK, HLINK, HLINK))
else:
display(verticals[i])
display('\n')
for child in node._children:
if child._children:
queue.append((child._value, child))
queue.sort()
for vertical in verticals:
display(vertical)
display('\n')
# finally, display the last line
display(''.join([item.center(width) for item in last_row]))
display('\n')
def __repr__(self):
if len(self._items) > 1:
root = _DendogramNode(self._merge, *self._items)
else:
root = self._items[0]
leaves = root.leaves(False)
return '<Dendogram with %d leaves>' % len(leaves)
########################################################################
from kmeans import *
from gaac import *
from em import *
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
"""
test_common
~~~~~~~~~~~
Test common functionality
"""
import base64
from utils import authenticate, json_authenticate, logout
try:
from cookielib import Cookie
except ImportError:
from http.cookiejar import Cookie
def test_login_view(client):
response = client.get('/login')
assert b'<h1>Login</h1>' in response.data
def test_authenticate(client):
response = authenticate(client)
assert response.status_code == 302
response = authenticate(client, follow_redirects=True)
assert b'Hello matt@lp.com' in response.data
def test_authenticate_with_next(client):
data = dict(email='matt@lp.com', password='password')
response = client.post(
'/login?next=/page1',
data=data,
follow_redirects=True)
assert b'Page 1' in response.data
def test_authenticate_with_invalid_next(client, get_message):
data = dict(email='matt@lp.com', password='password')
response = client.post('/login?next=http://google.com', data=data)
assert get_message('INVALID_REDIRECT') in response.data
def test_authenticate_with_invalid_malformed_next(client, get_message):
data = dict(email='matt@lp.com', password='password')
response = client.post('/login?next=http:///google.com', data=data)
assert get_message('INVALID_REDIRECT') in response.data
def test_authenticate_case_insensitive_email(app, client):
response = authenticate(client, 'MATT@lp.com', follow_redirects=True)
assert b'Hello matt@lp.com' in response.data
def test_authenticate_with_invalid_input(client, get_message):
response = client.post(
'/login',
data='{}',
headers={'Content-Type': 'application/json'},
)
assert get_message('EMAIL_NOT_PROVIDED') in response.data
def test_login_form(client):
response = client.post('/login', data={'email': 'matt@lp.com'})
assert b'matt@lp.com' in response.data
def test_unprovided_username(client, get_message):
response = authenticate(client, "")
assert get_message('EMAIL_NOT_PROVIDED') in response.data
def test_unprovided_password(client, get_message):
response = authenticate(client, password="")
assert get_message('PASSWORD_NOT_PROVIDED') in response.data
def test_invalid_user(client, get_message):
response = authenticate(client, email="bogus@bogus.com")
assert get_message('USER_DOES_NOT_EXIST') in response.data
def test_bad_password(client, get_message):
response = authenticate(client, password="bogus")
assert get_message('INVALID_PASSWORD') in response.data
def test_inactive_user(client, get_message):
response = authenticate(client, "tiya@lp.com", "password")
assert get_message('DISABLED_ACCOUNT') in response.data
def test_unset_password(client, get_message):
response = authenticate(client, "jess@lp.com", "password")
assert get_message('PASSWORD_NOT_SET') in response.data
def test_logout(client):
authenticate(client)
response = logout(client, follow_redirects=True)
assert b'Home Page' in response.data
def test_logout_with_next(client, get_message):
authenticate(client)
response = client.get('/logout?next=http://google.com')
assert 'google.com' not in response.location
def test_missing_session_access(client, get_message):
response = client.get('/profile', follow_redirects=True)
assert get_message('LOGIN') in response.data
def test_has_session_access(client):
authenticate(client)
response = client.get("/profile", follow_redirects=True)
assert b'profile' in response.data
def test_authorized_access(client):
authenticate(client)
response = client.get("/admin")
assert b'Admin Page' in response.data
def test_unauthorized_access(client, get_message):
authenticate(client, "joe@lp.com")
response = client.get("/admin", follow_redirects=True)
assert get_message('UNAUTHORIZED') in response.data
def test_roles_accepted(client):
for user in ("matt@lp.com", "joe@lp.com"):
authenticate(client, user)
response = client.get("/admin_or_editor")
assert b'Admin or Editor Page' in response.data
logout(client)
authenticate(client, "jill@lp.com")
response = client.get("/admin_or_editor", follow_redirects=True)
assert b'Home Page' in response.data
def test_unauthenticated_role_required(client, get_message):
response = client.get('/admin', follow_redirects=True)
assert get_message('UNAUTHORIZED') in response.data
def test_multiple_role_required(client):
for user in ("matt@lp.com", "joe@lp.com"):
authenticate(client, user)
response = client.get("/admin_and_editor", follow_redirects=True)
assert b'Home Page' in response.data
client.get('/logout')
authenticate(client, 'dave@lp.com')
response = client.get("/admin_and_editor", follow_redirects=True)
assert b'Admin and Editor Page' in response.data
def test_ok_json_auth(client):
response = json_authenticate(client)
assert response.jdata['meta']['code'] == 200
assert 'authentication_token' in response.jdata['response']['user']
def test_invalid_json_auth(client):
response = json_authenticate(client, password='junk')
assert b'"code": 400' in response.data
def test_token_auth_via_querystring_valid_token(client):
response = json_authenticate(client)
token = response.jdata['response']['user']['authentication_token']
response = client.get('/token?auth_token=' + token)
assert b'Token Authentication' in response.data
def test_token_auth_via_header_valid_token(client):
response = json_authenticate(client)
token = response.jdata['response']['user']['authentication_token']
headers = {"Authentication-Token": token}
response = client.get('/token', headers=headers)
assert b'Token Authentication' in response.data
def test_token_auth_via_querystring_invalid_token(client):
response = client.get('/token?auth_token=X')
assert 401 == response.status_code
def test_token_auth_via_header_invalid_token(client):
response = client.get('/token', headers={"Authentication-Token": 'X'})
assert 401 == response.status_code
def test_http_auth(client):
response = client.get('/http', headers={
'Authorization': 'Basic %s' % base64.b64encode(
b"joe@lp.com:password").decode('utf-8')
})
assert b'HTTP Authentication' in response.data
def test_http_auth_no_authorization(client):
response = client.get('/http', headers={})
assert b'<h1>Unauthorized</h1>' in response.data
assert 'WWW-Authenticate' in response.headers
assert 'Basic realm="Login Required"' == response.headers[
'WWW-Authenticate']
def test_invalid_http_auth_invalid_username(client):
response = client.get('/http', headers={
'Authorization': 'Basic %s' % base64.b64encode(
b"bogus:bogus").decode('utf-8')
})
assert b'<h1>Unauthorized</h1>' in response.data
assert 'WWW-Authenticate' in response.headers
assert 'Basic realm="Login Required"' == response.headers[
'WWW-Authenticate']
def test_invalid_http_auth_bad_password(client):
response = client.get('/http', headers={
'Authorization': 'Basic %s' % base64.b64encode(
b"joe@lp.com:bogus").decode('utf-8')
})
assert b'<h1>Unauthorized</h1>' in response.data
assert 'WWW-Authenticate' in response.headers
assert 'Basic realm="Login Required"' == response.headers[
'WWW-Authenticate']
def test_custom_http_auth_realm(client):
response = client.get('/http_custom_realm', headers={
'Authorization': 'Basic %s' % base64.b64encode(
b"joe@lp.com:bogus").decode('utf-8')
})
assert b'<h1>Unauthorized</h1>' in response.data
assert 'WWW-Authenticate' in response.headers
assert 'Basic realm="My Realm"' == response.headers['WWW-Authenticate']
def test_multi_auth_basic(client):
response = client.get('/multi_auth', headers={
'Authorization': 'Basic %s' % base64.b64encode(
b"joe@lp.com:password").decode('utf-8')
})
assert b'Basic' in response.data
response = client.get('/multi_auth')
assert response.status_code == 401
def test_multi_auth_basic_invalid(client):
response = client.get('/multi_auth', headers={
'Authorization': 'Basic %s' % base64.b64encode(
b"bogus:bogus").decode('utf-8')
})
assert b'<h1>Unauthorized</h1>' in response.data
assert 'WWW-Authenticate' in response.headers
assert 'Basic realm="Login Required"' == response.headers[
'WWW-Authenticate']
response = client.get('/multi_auth')
print(response.headers)
assert response.status_code == 401
def test_multi_auth_token(client):
response = json_authenticate(client)
token = response.jdata['response']['user']['authentication_token']
response = client.get('/multi_auth?auth_token=' + token)
assert b'Token' in response.data
def test_multi_auth_session(client):
authenticate(client, )
response = client.get('/multi_auth')
assert b'Session' in response.data
def test_user_deleted_during_session_reverts_to_anonymous_user(app, client):
authenticate(client)
with app.test_request_context('/'):
user = app.security.datastore.find_user(email='matt@lp.com')
app.security.datastore.delete_user(user)
app.security.datastore.commit()
response = client.get('/')
assert b'Hello matt@lp.com' not in response.data
def test_remember_token(client):
response = authenticate(client, follow_redirects=False)
client.cookie_jar.clear_session_cookies()
response = client.get('/profile')
assert b'profile' in response.data
def test_request_loader_does_not_fail_with_invalid_token(client):
c = Cookie(version=0, name='remember_token', value='None', port=None,
port_specified=False, domain='www.example.com',
domain_specified=False, domain_initial_dot=False, path='/',
path_specified=True, secure=False, expires=None,
discard=True, comment=None, comment_url=None,
rest={'HttpOnly': None}, rfc2109=False)
client.cookie_jar.set_cookie(c)
response = client.get('/')
assert b'BadSignature' not in response.data
def test_sending_auth_token_with_json(client):
response = json_authenticate(client)
token = response.jdata['response']['user']['authentication_token']
data = '{"auth_token": "%s"}' % token
response = client.post(
'/token',
data=data,
headers={
'Content-Type': 'application/json'})
assert b'Token Authentication' in response.data
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*!
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.dev/license
*/
import {Type} from '@angular/core';
import {SafeHtml} from '@angular/platform-browser';
/**
* Map of the examples, values are functions which returns the promise of the component type, which will be displayed as preview in the ExampleViewer component
*/
export interface CodeExamplesMap {
[id: string]: () => Promise<Type<unknown>>;
}
export interface Snippet {
/** Title of the code snippet */
title?: string;
/** Name of the file. */
name: string;
/** Content of code snippet */
sanitizedContent: SafeHtml;
/** Text in following format `start-end`. Start and end are numbers, based on them provided range of lines will be displayed in collapsed mode */
visibleLinesRange?: string;
shell?: boolean;
}
export interface ExampleMetadata {
/** Numeric id of example, used to generate unique link to the example */
id: number;
/** Title of the example. */
title?: string;
/** Path to the preview component */
path?: string;
/** List of files which are part of the example. */
files: Snippet[];
/** True when ExampleViewer should have preview */
preview: boolean;
/** Whether to hide code example by default. */
hideCode: boolean;
/** Visual style for the code example */
style?: 'prefer' | 'avoid';
}
|
typescript
|
github
|
https://github.com/angular/angular
|
adev/shared-docs/interfaces/code-example.ts
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-21 09:54
from __future__ import unicode_literals
import django.contrib.postgres.fields.hstore
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('goals', '0046_add_field_topojson_to_areatype'),
]
operations = [
migrations.CreateModel(
name='Sector',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='Sector name')),
('code', models.CharField(max_length=20, verbose_name='Sector code')),
('description', models.TextField(blank=True, verbose_name='Sector description')),
('image', models.ImageField(blank=True, null=True, upload_to='goals/sectors/images', verbose_name='Image')),
('slug', models.SlugField(blank=True, verbose_name='Slug')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('last_modified', models.DateTimeField(auto_now=True, verbose_name='Last modified')),
('extras', django.contrib.postgres.fields.hstore.HStoreField(blank=True, default={}, null=True, verbose_name='Extras')),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='goals.Sector')),
],
options={
'verbose_name': 'Sector',
'verbose_name_plural': 'Sectors',
},
),
migrations.CreateModel(
name='SectorType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=20, unique=True, verbose_name='Code')),
('name', models.CharField(max_length=255, verbose_name='Name')),
('description', models.TextField(blank=True, verbose_name='Description')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('last_modified', models.DateTimeField(auto_now=True, verbose_name='Last modified')),
('extras', django.contrib.postgres.fields.hstore.HStoreField(blank=True, default={}, null=True, verbose_name='Extras')),
],
options={
'verbose_name': 'Sector Type',
'verbose_name_plural': 'Sector Types',
},
),
migrations.CreateModel(
name='Theme',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='Theme name')),
('code', models.CharField(max_length=10, verbose_name='Theme number')),
('description', models.TextField(blank=True, verbose_name='Theme description')),
('image', models.ImageField(blank=True, null=True, upload_to='goals/themes/images', verbose_name='Image')),
('slug', models.SlugField(blank=True, verbose_name='Slug')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('last_modified', models.DateTimeField(auto_now=True, verbose_name='Last modified')),
('extras', django.contrib.postgres.fields.hstore.HStoreField(blank=True, default={}, null=True, verbose_name='Extras')),
('plans', models.ManyToManyField(related_name='themes', to='goals.Plan', verbose_name='Plans')),
],
options={
'verbose_name': 'Theme',
'verbose_name_plural': 'Themes',
},
),
migrations.AlterField(
model_name='indicator',
name='target',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='indicators', to='goals.Target', verbose_name='Target'),
),
migrations.AddField(
model_name='sector',
name='type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sextors', to='goals.SectorType', verbose_name='Sector type'),
),
migrations.AddField(
model_name='indicator',
name='sector',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='indicators', to='goals.Sector', verbose_name='Theme'),
),
migrations.AddField(
model_name='indicator',
name='theme',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='indicators', to='goals.Theme', verbose_name='Theme'),
),
]
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Peter Sprygada <psprygada@ansible.com>
# Copyright: (c) 2016, Dell Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: dellos6_command
version_added: "2.2"
author: "Abirami N (@abirami-n)"
short_description: Run commands on remote devices running Dell OS6
description:
- Sends arbitrary commands to a Dell OS6 node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
- This module does not support running commands in configuration mode.
Please use M(dellos6_config) to configure Dell OS6 devices.
extends_documentation_fragment: dellos6
options:
commands:
description:
- List of commands to send to the remote dellos6 device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retries has expired.
type: list
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of I(retries), the task fails.
See examples.
type: list
version_added: "2.2"
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the wait_for must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
type: str
default: all
choices: [ all, any ]
version_added: "2.5"
retries:
description:
- Specifies the number of retries a command should be tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
type: int
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
type: int
default: 1
"""
EXAMPLES = """
tasks:
- name: run show version on remote devices
dellos6_command:
commands: show version
- name: run show version and check to see if output contains Dell
dellos6_command:
commands: show version
wait_for: result[0] contains Dell
- name: run multiple commands on remote nodes
dellos6_command:
commands:
- show version
- show interfaces
- name: run multiple commands and evaluate the output
dellos6_command:
commands:
- show version
- show interfaces
wait_for:
- result[0] contains Dell
- result[1] contains Access
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always apart from low level errors (such as action plugin)
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always apart from low level errors (such as action plugin)
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
warnings:
description: The list of warnings (if any) generated by module based on arguments
returned: always
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.dellos6.dellos6 import run_commands
from ansible.module_utils.network.dellos6.dellos6 import dellos6_argument_spec, check_args
from ansible.module_utils.network.common.utils import ComplexList
from ansible.module_utils.network.common.parsing import Conditional
from ansible.module_utils.six import string_types
def to_lines(stdout):
for item in stdout:
if isinstance(item, string_types):
item = str(item).split('\n')
yield item
def parse_commands(module, warnings):
command = ComplexList(dict(
command=dict(key=True),
prompt=dict(),
answer=dict()
), module)
commands = command(module.params['commands'])
for index, item in enumerate(commands):
if module.check_mode and not item['command'].startswith('show'):
warnings.append(
'only show commands are supported when using check mode, not '
'executing `%s`' % item['command']
)
elif item['command'].startswith('conf'):
module.fail_json(
msg='dellos6_command does not support running config mode '
'commands. Please use dellos6_config instead'
)
return commands
def main():
"""main entry point for module execution
"""
argument_spec = dict(
# { command: <str>, prompt: <str>, response: <str> }
commands=dict(type='list', required=True),
wait_for=dict(type='list'),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
argument_spec.update(dellos6_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
check_args(module, warnings)
commands = parse_commands(module, warnings)
result['warnings'] = warnings
wait_for = module.params['wait_for'] or list()
conditionals = [Conditional(c) for c in wait_for]
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not been satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result.update({
'changed': False,
'stdout': responses,
'stdout_lines': list(to_lines(responses))
})
module.exit_json(**result)
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
import os.path
import six
import eventlet
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import fileutils
from neutron.agent.common import config as agent_cfg
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import utils as common_utils
from neutron.i18n import _LE
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt('external_pids',
default='$state_path/external/pids',
help=_('Location to store child pid files')),
]
cfg.CONF.register_opts(OPTS)
agent_cfg.register_process_monitor_opts(cfg.CONF)
@six.add_metaclass(abc.ABCMeta)
class MonitoredProcess(object):
@abc.abstractproperty
def active(self):
"""Boolean representing the running state of the process."""
@abc.abstractmethod
def enable(self):
"""Enable the service, or respawn the process."""
class ProcessManager(MonitoredProcess):
"""An external process manager for Neutron spawned processes.
Note: The manager expects uuid to be in cmdline.
"""
def __init__(self, conf, uuid, namespace=None, service=None,
pids_path=None, default_cmd_callback=None,
cmd_addl_env=None, pid_file=None, run_as_root=False):
self.conf = conf
self.uuid = uuid
self.namespace = namespace
self.default_cmd_callback = default_cmd_callback
self.cmd_addl_env = cmd_addl_env
self.pids_path = pids_path or self.conf.external_pids
self.pid_file = pid_file
self.run_as_root = run_as_root
if service:
self.service_pid_fname = 'pid.' + service
self.service = service
else:
self.service_pid_fname = 'pid'
self.service = 'default-service'
common_utils.ensure_dir(os.path.dirname(self.get_pid_file_name()))
def enable(self, cmd_callback=None, reload_cfg=False):
if not self.active:
if not cmd_callback:
cmd_callback = self.default_cmd_callback
cmd = cmd_callback(self.get_pid_file_name())
ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace)
ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env,
run_as_root=self.run_as_root)
elif reload_cfg:
self.reload_cfg()
def reload_cfg(self):
self.disable('HUP')
def disable(self, sig='9', get_stop_command=None):
pid = self.pid
if self.active:
if get_stop_command:
cmd = get_stop_command(self.get_pid_file_name())
ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace)
ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env)
else:
cmd = ['kill', '-%s' % (sig), pid]
utils.execute(cmd, run_as_root=True)
# In the case of shutting down, remove the pid file
if sig == '9':
fileutils.delete_if_exists(self.get_pid_file_name())
elif pid:
LOG.debug('Process for %(uuid)s pid %(pid)d is stale, ignoring '
'signal %(signal)s', {'uuid': self.uuid, 'pid': pid,
'signal': sig})
else:
LOG.debug('No process started for %s', self.uuid)
def get_pid_file_name(self):
"""Returns the file name for a given kind of config file."""
if self.pid_file:
return self.pid_file
else:
return utils.get_conf_file_name(self.pids_path,
self.uuid,
self.service_pid_fname)
@property
def pid(self):
"""Last known pid for this external process spawned for this uuid."""
return utils.get_value_from_file(self.get_pid_file_name(), int)
@property
def active(self):
pid = self.pid
if pid is None:
return False
cmdline = '/proc/%s/cmdline' % pid
try:
with open(cmdline, "r") as f:
return self.uuid in f.readline()
except IOError:
return False
ServiceId = collections.namedtuple('ServiceId', ['uuid', 'service'])
class ProcessMonitor(object):
def __init__(self, config, resource_type):
"""Handle multiple process managers and watch over all of them.
:param config: oslo config object with the agent configuration.
:type config: oslo_config.ConfigOpts
:param resource_type: can be dhcp, router, load_balancer, etc.
:type resource_type: str
"""
self._config = config
self._resource_type = resource_type
self._monitored_processes = {}
if self._config.AGENT.check_child_processes_interval:
self._spawn_checking_thread()
def register(self, uuid, service_name, monitored_process):
"""Start monitoring a process.
The given monitored_process will be tied to it's uuid+service_name
replacing the old one if it existed already.
The monitored_process should be enabled before registration,
otherwise ProcessMonitor could try to enable the process itself,
which could lead to double enable and if unlucky enough, two processes
running, and also errors in the logs.
:param uuid: An ID of the resource for which the process is running.
:param service_name: A logical service name for this process monitor,
so the same uuid provided via process manager
can reference several different services.
:param monitored_process: MonitoredProcess we want to monitor.
"""
service_id = ServiceId(uuid, service_name)
self._monitored_processes[service_id] = monitored_process
def unregister(self, uuid, service_name):
"""Stop monitoring a process.
The uuid+service_name will be removed from the monitored processes.
The service must be disabled **after** unregistering, otherwise if
process monitor checks after you disable the process, and before
you unregister it, the process will be respawned, and left orphaned
into the system.
:param uuid: An ID of the resource for which the process is running.
:param service_name: A logical service name for this process monitor,
so the same uuid provided via process manager
can reference several different services.
"""
service_id = ServiceId(uuid, service_name)
self._monitored_processes.pop(service_id, None)
def stop(self):
"""Stop the process monitoring.
This method will stop the monitoring thread, but no monitored
process will be stopped.
"""
self._monitor_processes = False
def _spawn_checking_thread(self):
self._monitor_processes = True
eventlet.spawn(self._periodic_checking_thread)
@lockutils.synchronized("_check_child_processes")
def _check_child_processes(self):
# we build the list of keys before iterating in the loop to cover
# the case where other threads add or remove items from the
# dictionary which otherwise will cause a RuntimeError
for service_id in list(self._monitored_processes):
pm = self._monitored_processes.get(service_id)
if pm and not pm.active:
LOG.error(_LE("%(service)s for %(resource_type)s "
"with uuid %(uuid)s not found. "
"The process should not have died"),
{'service': pm.service,
'resource_type': self._resource_type,
'uuid': service_id.uuid})
self._execute_action(service_id)
eventlet.sleep(0)
def _periodic_checking_thread(self):
while self._monitor_processes:
eventlet.sleep(self._config.AGENT.check_child_processes_interval)
eventlet.spawn(self._check_child_processes)
def _execute_action(self, service_id):
action = self._config.AGENT.check_child_processes_action
action_function = getattr(self, "_%s_action" % action)
action_function(service_id)
def _respawn_action(self, service_id):
LOG.error(_LE("respawning %(service)s for uuid %(uuid)s"),
{'service': service_id.service,
'uuid': service_id.uuid})
self._monitored_processes[service_id].enable()
def _exit_action(self, service_id):
LOG.error(_LE("Exiting agent as programmed in check_child_processes_"
"actions"))
self._exit_handler(service_id.uuid, service_id.service)
def _exit_handler(self, uuid, service):
"""This is an exit handler for the ProcessMonitor.
It will be called if the administrator configured the exit action in
check_child_processes_actions, and one of our external processes die
unexpectedly.
"""
LOG.error(_LE("Exiting agent because of a malfunction with the "
"%(service)s process identified by uuid %(uuid)s"),
{'service': service, 'uuid': uuid})
raise SystemExit(1)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
Periodic CSTR
This example illustrates a CSTR with steady inputs but periodic interior state.
A stoichiometric hydrogen/oxygen mixture is introduced and reacts to produce
water. But since water has a large efficiency as a third body in the chain
termination reaction
H + O2 + M = HO2 + M
as soon as a significant amount of water is produced the reaction stops. After
enough time has passed that the water is exhausted from the reactor, the mixture
explodes again and the process repeats. This explanation can be verified by
decreasing the rate for reaction 7 in file 'h2o2.cti' and re-running the
example.
Acknowledgments: The idea for this example and an estimate of the conditions
needed to see the oscillations came from Bob Kee, Colorado School of Mines
"""
import cantera as ct
import numpy as np
# create the gas mixture
gas = ct.Solution('h2o2.cti')
# pressure = 60 Torr, T = 770 K
p = 60.0*133.3
t = 770.0
gas.TPX = t, p, 'H2:2, O2:1'
# create an upstream reservoir that will supply the reactor. The temperature,
# pressure, and composition of the upstream reservoir are set to those of the
# 'gas' object at the time the reservoir is created.
upstream = ct.Reservoir(gas)
# Now create the reactor object with the same initial state
cstr = ct.IdealGasReactor(gas)
# Set its volume to 10 cm^3. In this problem, the reactor volume is fixed, so
# the initial volume is the volume at all later times.
cstr.volume = 10.0*1.0e-6
# We need to have heat loss to see the oscillations. Create a reservoir to
# represent the environment, and initialize its temperature to the reactor
# temperature.
env = ct.Reservoir(gas)
# Create a heat-conducting wall between the reactor and the environment. Set its
# area, and its overall heat transfer coefficient. Larger U causes the reactor
# to be closer to isothermal. If U is too small, the gas ignites, and the
# temperature spikes and stays high.
w = ct.Wall(cstr, env, A=1.0, U=0.02)
# Connect the upstream reservoir to the reactor with a mass flow controller
# (constant mdot). Set the mass flow rate to 1.25 sccm.
sccm = 1.25
vdot = sccm * 1.0e-6/60.0 * ((ct.one_atm / gas.P) * ( gas.T / 273.15)) # m^3/s
mdot = gas.density * vdot # kg/s
mfc = ct.MassFlowController(upstream, cstr, mdot=mdot)
# now create a downstream reservoir to exhaust into.
downstream = ct.Reservoir(gas)
# connect the reactor to the downstream reservoir with a valve, and set the
# coefficient sufficiently large to keep the reactor pressure close to the
# downstream pressure of 60 Torr.
v = ct.Valve(cstr, downstream, K=1.0e-9)
# create the network
network = ct.ReactorNet([cstr])
# now integrate in time
t = 0.0
dt = 0.1
tm = []
y = []
while t < 300.0:
t += dt
network.advance(t)
tm.append(t)
y.append(cstr.thermo['H2','O2','H2O'].Y)
if __name__ == '__main__':
print(__doc__)
try:
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot(tm, y)
plt.legend(['H2','O2','H2O'])
plt.title('Mass Fractions')
plt.show()
except ImportError:
print('Matplotlib not found. Unable to plot results.')
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package observe
import (
"context"
"github.com/hashicorp/vault/sdk/logical"
)
type PluginObserve interface {
// RecordObservation is used to record observations through the plugin's observation system.
// It returns ErrNoObservations if the observation system has not been configured or enabled.
RecordObservation(ctx context.Context, observationType string, data map[string]interface{}) error
}
type PkiObserver interface {
RecordPKIObservation(ctx context.Context, req *logical.Request, observationType string, additionalMetadata ...AdditionalPKIMetadata)
}
type AdditionalPKIMetadata struct {
key string
value any
}
func NewAdditionalPKIMetadata(key string, value any) AdditionalPKIMetadata {
return AdditionalPKIMetadata{key: key, value: value}
}
|
go
|
github
|
https://github.com/hashicorp/vault
|
builtin/logical/pki/observe/observe.go
|
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
*This model was released on 2019-04-19 and added to Hugging Face Transformers on 2022-09-09.*
<div style="float: right;">
<div class="flex flex-wrap space-x-1">
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white" >
</div>
</div>
# ERNIE
[ERNIE1.0](https://huggingface.co/papers/1904.09223), [ERNIE2.0](https://ojs.aaai.org/index.php/AAAI/article/view/6428),
[ERNIE3.0](https://huggingface.co/papers/2107.02137), [ERNIE-Gram](https://huggingface.co/papers/2010.12148), [ERNIE-health](https://huggingface.co/papers/2110.07244) are a series of powerful models proposed by baidu, especially in Chinese tasks.
ERNIE (Enhanced Representation through kNowledge IntEgration) is designed to learn language representation enhanced by knowledge masking strategies, which includes entity-level masking and phrase-level masking.
Other ERNIE models released by baidu can be found at [Ernie 4.5](./ernie4_5), and [Ernie 4.5 MoE](./ernie4_5_moe).
> [!TIP]
> This model was contributed by [nghuyong](https://huggingface.co/nghuyong), and the official code can be found in [PaddleNLP](https://github.com/PaddlePaddle/PaddleNLP) (in PaddlePaddle).
>
> Click on the ERNIE models in the right sidebar for more examples of how to apply ERNIE to different language tasks.
The example below demonstrates how to predict the `[MASK]` token with [`Pipeline`], [`AutoModel`], and from the command line.
<hfoptions id="usage">
<hfoption id="Pipeline">
```py
from transformers import pipeline
pipeline = pipeline(
task="fill-mask",
model="nghuyong/ernie-3.0-xbase-zh"
)
pipeline("巴黎是[MASK]国的首都。")
```
</hfoption>
<hfoption id="AutoModel">
```py
import torch
from transformers import AutoModelForMaskedLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
"nghuyong/ernie-3.0-xbase-zh",
)
model = AutoModelForMaskedLM.from_pretrained(
"nghuyong/ernie-3.0-xbase-zh",
dtype=torch.float16,
device_map="auto"
)
inputs = tokenizer("巴黎是[MASK]国的首都。", return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model(**inputs)
predictions = outputs.logits
masked_index = torch.where(inputs['input_ids'] == tokenizer.mask_token_id)[1]
predicted_token_id = predictions[0, masked_index].argmax(dim=-1)
predicted_token = tokenizer.decode(predicted_token_id)
print(f"The predicted token is: {predicted_token}")
```
</hfoption>
<hfoption id="transformers CLI">
```bash
echo -e "巴黎是[MASK]国的首都。" | transformers run --task fill-mask --model nghuyong/ernie-3.0-xbase-zh --device 0
```
</hfoption>
</hfoptions>
## Notes
Model variants are available in different sizes and languages.
| Model Name | Language | Description |
|:-------------------:|:--------:|:-------------------------------:|
| ernie-1.0-base-zh | Chinese | Layer:12, Heads:12, Hidden:768 |
| ernie-2.0-base-en | English | Layer:12, Heads:12, Hidden:768 |
| ernie-2.0-large-en | English | Layer:24, Heads:16, Hidden:1024 |
| ernie-3.0-base-zh | Chinese | Layer:12, Heads:12, Hidden:768 |
| ernie-3.0-medium-zh | Chinese | Layer:6, Heads:12, Hidden:768 |
| ernie-3.0-mini-zh | Chinese | Layer:6, Heads:12, Hidden:384 |
| ernie-3.0-micro-zh | Chinese | Layer:4, Heads:12, Hidden:384 |
| ernie-3.0-nano-zh | Chinese | Layer:4, Heads:12, Hidden:312 |
| ernie-health-zh | Chinese | Layer:12, Heads:12, Hidden:768 |
| ernie-gram-zh | Chinese | Layer:12, Heads:12, Hidden:768 |
## Resources
You can find all the supported models from huggingface's model hub: [huggingface.co/nghuyong](https://huggingface.co/nghuyong), and model details from paddle's official
repo: [PaddleNLP](https://paddlenlp.readthedocs.io/zh/latest/model_zoo/transformers/ERNIE/contents.html)
and [ERNIE's legacy branch](https://github.com/PaddlePaddle/ERNIE/tree/legacy/develop).
## ErnieConfig
[[autodoc]] ErnieConfig
- all
## Ernie specific outputs
[[autodoc]] models.ernie.modeling_ernie.ErnieForPreTrainingOutput
## ErnieModel
[[autodoc]] ErnieModel
- forward
## ErnieForPreTraining
[[autodoc]] ErnieForPreTraining
- forward
## ErnieForCausalLM
[[autodoc]] ErnieForCausalLM
- forward
## ErnieForMaskedLM
[[autodoc]] ErnieForMaskedLM
- forward
## ErnieForNextSentencePrediction
[[autodoc]] ErnieForNextSentencePrediction
- forward
## ErnieForSequenceClassification
[[autodoc]] ErnieForSequenceClassification
- forward
## ErnieForMultipleChoice
[[autodoc]] ErnieForMultipleChoice
- forward
## ErnieForTokenClassification
[[autodoc]] ErnieForTokenClassification
- forward
## ErnieForQuestionAnswering
[[autodoc]] ErnieForQuestionAnswering
- forward
|
unknown
|
github
|
https://github.com/huggingface/transformers
|
docs/source/en/model_doc/ernie.md
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Cumulus Networks <ce-ceng@cumulusnetworks.com>
#
# This file is part of Ansible
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cl_ports
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Configure Cumulus Switch port attributes (ports.conf)
description:
- Set the initial port attribute defined in the Cumulus Linux ports.conf,
file. This module does not do any error checking at the moment. Be careful
to not include ports that do not exist on the switch. Carefully read the
original ports.conf file for any exceptions or limitations.
For more details go the Configure Switch Port Attribute Documentation at
U(http://docs.cumulusnetworks.com).
options:
speed_10g:
description:
- List of ports to run initial run at 10G.
speed_40g:
description:
- List of ports to run initial run at 40G.
speed_4_by_10g:
description:
- List of 40G ports that will be unganged to run as 4 10G ports.
speed_40g_div_4:
description:
- List of 10G ports that will be ganged to form a 40G port.
'''
EXAMPLES = '''
# Use cl_ports module to manage the switch attributes defined in the
# ports.conf file on Cumulus Linux
## Unganged port configuration on certain ports
- name: configure ports.conf setup
cl_ports:
speed_4_by_10g:
- swp1
- swp32
speed_40g:
- swp2-31
## Unganged port configuration on certain ports
- name: configure ports.conf setup
cl_ports:
speed_4_by_10g:
- swp1-3
- swp6
speed_40g:
- swp4-5
- swp7-32
'''
RETURN = '''
changed:
description: whether the interface was changed
returned: changed
type: bool
sample: True
msg:
description: human-readable report of success or failure
returned: always
type: string
sample: "interface bond0 config updated"
'''
PORTS_CONF = '/etc/cumulus/ports.conf'
def hash_existing_ports_conf(module):
module.ports_conf_hash = {}
if not os.path.exists(PORTS_CONF):
return False
try:
existing_ports_conf = open(PORTS_CONF).readlines()
except IOError:
error_msg = get_exception()
_msg = "Failed to open %s: %s" % (PORTS_CONF, error_msg)
module.fail_json(msg=_msg)
return # for testing only should return on module.fail_json
for _line in existing_ports_conf:
_m0 = re.match(r'^(\d+)=(\w+)', _line)
if _m0:
_portnum = int(_m0.group(1))
_speed = _m0.group(2)
module.ports_conf_hash[_portnum] = _speed
def generate_new_ports_conf_hash(module):
new_ports_conf_hash = {}
convert_hash = {
'speed_40g_div_4': '40G/4',
'speed_4_by_10g': '4x10G',
'speed_10g': '10G',
'speed_40g': '40G'
}
for k in module.params.keys():
port_range = module.params[k]
port_setting = convert_hash[k]
if port_range:
port_range = [x for x in port_range if x]
for port_str in port_range:
port_range_str = port_str.replace('swp', '').split('-')
if len(port_range_str) == 1:
new_ports_conf_hash[int(port_range_str[0])] = \
port_setting
else:
int_range = map(int, port_range_str)
portnum_range = range(int_range[0], int_range[1]+1)
for i in portnum_range:
new_ports_conf_hash[i] = port_setting
module.new_ports_hash = new_ports_conf_hash
def compare_new_and_old_port_conf_hash(module):
ports_conf_hash_copy = module.ports_conf_hash.copy()
module.ports_conf_hash.update(module.new_ports_hash)
port_num_length = len(module.ports_conf_hash.keys())
orig_port_num_length = len(ports_conf_hash_copy.keys())
if port_num_length != orig_port_num_length:
module.fail_json(msg="Port numbering is wrong. \
Too many or two few ports configured")
return False
elif ports_conf_hash_copy == module.ports_conf_hash:
return False
return True
def make_copy_of_orig_ports_conf(module):
if os.path.exists(PORTS_CONF + '.orig'):
return
try:
shutil.copyfile(PORTS_CONF, PORTS_CONF + '.orig')
except IOError:
error_msg = get_exception()
_msg = "Failed to save the original %s: %s" % (PORTS_CONF, error_msg)
module.fail_json(msg=_msg)
return # for testing only
def write_to_ports_conf(module):
"""
use tempfile to first write out config in temp file
then write to actual location. may help prevent file
corruption. Ports.conf is a critical file for Cumulus.
Don't want to corrupt this file under any circumstance.
"""
temp = tempfile.NamedTemporaryFile()
try:
try:
temp.write('# Managed By Ansible\n')
for k in sorted(module.ports_conf_hash.keys()):
port_setting = module.ports_conf_hash[k]
_str = "%s=%s\n" % (k, port_setting)
temp.write(_str)
temp.seek(0)
shutil.copyfile(temp.name, PORTS_CONF)
except IOError:
error_msg = get_exception()
module.fail_json(
msg="Failed to write to %s: %s" % (PORTS_CONF, error_msg))
finally:
temp.close()
def main():
module = AnsibleModule(
argument_spec=dict(
speed_40g_div_4=dict(type='list'),
speed_4_by_10g=dict(type='list'),
speed_10g=dict(type='list'),
speed_40g=dict(type='list')
),
required_one_of=[['speed_40g_div_4',
'speed_4_by_10g',
'speed_10g',
'speed_40g']]
)
_changed = False
hash_existing_ports_conf(module)
generate_new_ports_conf_hash(module)
if compare_new_and_old_port_conf_hash(module):
make_copy_of_orig_ports_conf(module)
write_to_ports_conf(module)
_changed = True
_msg = "/etc/cumulus/ports.conf changed"
else:
_msg = 'No change in /etc/ports.conf'
module.exit_json(changed=_changed, msg=_msg)
# import module snippets
from ansible.module_utils.basic import *
# from ansible.module_utils.urls import *
import os
import tempfile
import shutil
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
import time
from tokens import GetTokens
from slackclient import SlackClient
from card_search import CardFinder
token = GetTokens().slack_bot() ## custom class to hide token
client = SlackClient(token)
card_name = None
if client.rtm_connect():
while True:
last_read = client.rtm_read()
if last_read:
try:
parsed = last_read[0]['text']
message_channel = last_read[0]['channel']
if parsed and parsed[0] == '!':
print(parsed)
client.rtm_send_message(message_channel, 'searching...')
with CardFinder(parsed[1:], 'online') as cf:
card_name = cf
if len(card_name) > 0:
multiverse_id = card_name['multiverseid']
url_prefix = 'http://gatherer.wizards.com/Handlers/Image.ashx?multiverseid=%s&type=card'
message_string = url_prefix % multiverse_id
client.rtm_send_message(message_channel, '%s' % (message_string))
else:
client.rtm_send_message(message_channel, 'sorry, no match found!')
print('found: ', card_name)
## ~/ clean up
parsed = None
card_name = None
except:
pass
time.sleep(1)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#ifndef RBIMPL_INTERN_VM_H /*-*-C++-*-vi:se ft=cpp:*/
#define RBIMPL_INTERN_VM_H
/**
* @file
* @author Ruby developers <ruby-core@ruby-lang.org>
* @copyright This file is a part of the programming language Ruby.
* Permission is hereby granted, to either redistribute and/or
* modify this file, provided that the conditions mentioned in the
* file COPYING are met. Consult the file for details.
* @warning Symbols prefixed with either `RBIMPL` or `rbimpl` are
* implementation details. Don't take them as canon. They could
* rapidly appear then vanish. The name (path) of this header file
* is also an implementation detail. Do not expect it to persist
* at the place it is now. Developers are free to move it anywhere
* anytime at will.
* @note To ruby-core: remember that this header can be possibly
* recursively included from extension libraries written in C++.
* Do not expect for instance `__VA_ARGS__` is always available.
* We assume C99 for ruby itself but we don't assume languages of
* extension libraries. They could be written in C++98.
* @brief Public APIs related to rb_cRubyVM.
*/
#include "ruby/internal/attr/nonnull.h"
#include "ruby/internal/attr/noreturn.h"
#include "ruby/internal/dllexport.h"
#include "ruby/internal/value.h"
RBIMPL_SYMBOL_EXPORT_BEGIN()
/* vm.c */
/**
* Resembles `__LINE__`.
*
* @retval 0 Current execution context not in a ruby method.
* @retval otherwise The current line number of the current thread of the
* current ractor of the current execution context.
*/
int rb_sourceline(void);
/**
* Resembles `__FILE__`.
*
* @retval 0 Current execution context not in a ruby method.
* @retval otherwise The current source path of the current thread of the
* current ractor of the current execution context.
* @note This may or may not be an absolute path.
*/
const char *rb_sourcefile(void);
/**
* Resembles `__method__`.
*
* @param[out] idp Return buffer for method id.
* @param[out] klassp Return buffer for class.
* @retval 0 Current execution context not in a method.
* @retval 1 Successful return.
* @post Upon successful return `*idp` and `*klassp` are updated to have
* the current method name and its defined class respectively.
* @note Both parameters can be `NULL`.
*/
int rb_frame_method_id_and_class(ID *idp, VALUE *klassp);
/* vm_eval.c */
/**
* Identical to rb_funcallv(), except it returns ::RUBY_Qundef instead of
* raising ::rb_eNoMethodError.
*
* @param[in,out] recv Receiver of the method.
* @param[in] mid Name of the method to call.
* @param[in] argc Number of arguments.
* @param[in] argv Arbitrary number of method arguments.
* @retval RUBY_Qundef `recv` doesn't respond to `mid`.
* @retval otherwise What the method evaluates to.
*/
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv);
/**
* Identical to rb_check_funcall(), except you can specify how to handle the
* last element of the given array. It can also be seen as a routine identical
* to rb_funcallv_kw(), except it returns ::RUBY_Qundef instead of raising
* ::rb_eNoMethodError.
*
* @param[in,out] recv Receiver of the method.
* @param[in] mid Name of the method to call.
* @param[in] argc Number of arguments.
* @param[in] argv Arbitrary number of method arguments.
* @param[in] kw_splat Handling of keyword parameters:
* - RB_NO_KEYWORDS `argv`'s last is not a keyword argument.
* - RB_PASS_KEYWORDS `argv`'s last is a keyword argument.
* - RB_PASS_CALLED_KEYWORDS Pass keyword arguments if the current method
* was called with keyword arguments.
* @retval RUBY_Qundef `recv` doesn't respond to `mid`.
* @retval otherwise What the method evaluates to.
*/
VALUE rb_check_funcall_kw(VALUE recv, ID mid, int argc, const VALUE *argv, int kw_splat);
/**
* This API is practically a variant of rb_proc_call_kw() now. Historically
* when there still was a concept called `$SAFE`, this was an API for that.
* But we no longer have that. This function basically ended its role. It
* just remains here because of no harm.
*
* @param[in] cmd A string, or something callable.
* @param[in] arg Argument passed to the call.
* @param[in] kw_splat Handling of keyword parameters:
* - RB_NO_KEYWORDS `arg`'s last is not a keyword argument.
* - RB_PASS_KEYWORDS `arg`'s last is a keyword argument.
* - RB_PASS_CALLED_KEYWORDS Pass keyword arguments if the current method
* was called with keyword arguments.
* @return What the command evaluates to.
*/
RBIMPL_ATTR_DEPRECATED_INTERNAL(4.0)
VALUE rb_eval_cmd_kw(VALUE cmd, VALUE arg, int kw_splat);
/**
* Identical to rb_funcallv(), except it takes Ruby's array instead of C's.
* @param[in,out] recv Receiver of the method.
* @param[in] mid Name of the method to call.
* @param[in] args An instance of ::RArray.
* @exception rb_eNoMethodError No such method.
* @exception rb_eException Any exceptions happen inside.
* @return What the method evaluates to.
* @pre `args` must be an ::RArray. Call `to_ary` beforehand when
* necessary.
*/
VALUE rb_apply(VALUE recv, ID mid, VALUE args);
/**
* Evaluates a string containing Ruby source code, or the given block, within
* the context of the receiver. In order to set the context, the variable
* `self` is set to `recv` while the code is executing, giving the code access
* to `recv`'s instance variables and private methods.
*
* When given a block, `recv` is also passed in as the block's only argument.
*
* When given a string, the optional second and third parameters supply a
* filename and starting line number that are used when reporting compilation
* errors.
*
* @param[in] argc Number of objects in `argv`
* @param[in] argv C array of 0 up to 3 elements.
* @param[in] recv The object in question.
* @return What was evaluated.
*/
VALUE rb_obj_instance_eval(int argc, const VALUE *argv, VALUE recv);
/**
* Executes the given block within the context of the receiver. In order to
* set the context, the variable `self` is set to `recv` while the code is
* executing, giving the code access to `recv`'s instance variables. Arguments
* are passed as block parameters.
*
* @param[in] argc Number of objects in `argv`
* @param[in] argv Arbitrary parameters to be passed to the block.
* @param[in] recv The object in question.
* @return What was evaluated.
* @note Don't confuse this with rb_obj_instance_eval(). The key
* difference is whether you can pass arbitrary parameters to the
* block, like this:
*
* ```ruby
* class Foo
* def initialize
* @foo = 5
* end
* end
* Foo.new.instance_exec(7) {|i| @foo + i } # => 12
* ```
*/
VALUE rb_obj_instance_exec(int argc, const VALUE *argv, VALUE recv);
/**
* Identical to rb_obj_instance_eval(), except it evaluates within the context
* of module.
*
* @param[in] argc Number of objects in `argv`
* @param[in] argv C array of 0 up to 3 elements.
* @param[in] mod The module in question.
* @pre `mod` must be a Module.
* @return What was evaluated.
*/
VALUE rb_mod_module_eval(int argc, const VALUE *argv, VALUE mod);
/**
* Identical to rb_obj_instance_exec(), except it evaluates within the context
* of module.
*
* @param[in] argc Number of objects in `argv`
* @param[in] argv Arbitrary parameters to be passed to the block.
* @param[in] mod The module in question.
* @pre `mod` must be a Module.
* @return What was evaluated.
*/
VALUE rb_mod_module_exec(int argc, const VALUE *argv, VALUE mod);
/* vm_method.c */
/**
* @private
*
* @deprecated This macro once was a thing in the old days, but makes no sense
* any longer today. Exists here for backwards compatibility
* only. You can safely forget about it.
*/
#define HAVE_RB_DEFINE_ALLOC_FUNC 1
/**
* This is the type of functions that ruby calls when trying to allocate an
* object. It is sometimes necessary to allocate extra memory regions for an
* object. When you define a class that uses ::RTypedData, it is typically the
* case. On such situations define a function of this type and pass it to
* rb_define_alloc_func().
*
* @param[in] klass The class that this function is registered.
* @return A newly allocated instance of `klass`.
*/
typedef VALUE (*rb_alloc_func_t)(VALUE klass);
/**
* Sets the allocator function of a class.
*
* @param[out] klass The class to modify.
* @param[in] func An allocator function for the class.
* @pre `klass` must be an instance of Class.
*/
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func);
/**
* Deletes the allocator function of a class. It is sometimes desirable to
* restrict creation of an instance of a class. For example it rarely makes
* sense for a DB adaptor class to allow programmers creating DB row objects
* without querying the DB itself. You can kill sporadic creation of such
* objects then, by nullifying the allocator function using this API.
*
* @param[out] klass The class to modify.
* @pre `klass` must be an instance of Class.
*/
void rb_undef_alloc_func(VALUE klass);
/**
* Queries the allocator function of a class.
*
* @param[in] klass The class in question.
* @pre `klass` must be an instance of Class.
* @retval 0 No allocator function is registered.
* @retval otherwise The allocator function.
*
* @internal
*
* Who cares? @shyouhei finds no practical usage of the return value. Maybe we
* need KonMari.
*/
rb_alloc_func_t rb_get_alloc_func(VALUE klass);
/**
* Clears the inline constant caches associated with a particular ID. Extension
* libraries should not bother with such things. Just forget about this API (or
* even, the presence of constant caches).
*/
void rb_clear_constant_cache_for_id(ID id);
/**
* Resembles `alias`.
*
* @param[out] klass Where to define an alias.
* @param[in] dst New name.
* @param[in] src Existing name.
* @exception rb_eTypeError `klass` is not a class.
* @exception rb_eFrozenError `klass` is frozen.
* @exception rb_eNameError No such method named `src`.
* @post `klass` has a method named `dst`, which is the identical to its
* method named `src`.
*/
void rb_alias(VALUE klass, ID dst, ID src);
/**
* This function resembles now-deprecated `Module#attr`.
*
* @param[out] klass Where to define an attribute.
* @param[in] name Name of an instance variable.
* @param[in] need_reader Whether attr_reader is needed.
* @param[in] need_writer Whether attr_writer is needed.
* @param[in] honour_visibility Whether to use the current visibility.
* @exception rb_eTypeError `klass` is not a class.
* @exception rb_eFrozenError `klass` is frozen.
* @post If `need_reader` is set `klass` has a method named `name`.
* @post If `need_writer` is set `klass` has a method named `name=`.
*
* @internal
*
* The three `int` arguments should have been bool, but there was no such thing
* like a bool when K&R was used in this project.
*/
void rb_attr(VALUE klass, ID name, int need_reader, int need_writer, int honour_visibility);
RBIMPL_ATTR_NONNULL(())
/**
* Removes a method. Don't confuse this to rb_undef_method(), which doesn't
* remove a method. This one resembles `Module#remove_method`.
*
* @param[out] klass The class to remove a method.
* @param[in] name Name of a method to be removed.
* @exception rb_eTypeError `klass` is a non-module.
* @exception rb_eFrozenError `klass` is frozen.
* @exception rb_eNameError No such method.
* @see rb_undef_method
*/
void rb_remove_method(VALUE klass, const char *name);
/**
* Identical to rb_remove_method(), except it accepts the method name as ::ID.
*
* @param[out] klass The class to remove a method.
* @param[in] mid Name of a method to be removed.
* @exception rb_eTypeError `klass` is a non-module.
* @exception rb_eFrozenError `klass` is frozen.
* @exception rb_eNameError No such method.
* @see rb_undef
*/
void rb_remove_method_id(VALUE klass, ID mid);
/**
* Queries if the klass has this method. This function has only one line of
* document in the implementation that states "// deprecated". Don't know what
* that means though.
*
* @param[in] klass The class in question.
* @param[in] id The method name to query.
* @param[in] ex Undocumented magic value.
* @retval false Method not found.
* @retval true There is a method.
* @pre `klass` must be a module.
*
* @internal
*
* @shyouhei has no motivation to describe what should be passed to `ex`. It
* seems this function should just be trashed.
*/
int rb_method_boundp(VALUE klass, ID id, int ex);
/**
* Well... Let us hesitate from describing what a "basic definition" is. This
* nuanced concept should have been kept private. Just please. Don't touch
* it. This function is a badly distributed random number generator. Right?
*
* @param[in] klass The class in question.
* @param[in] mid The method name in question.
* @retval 1 It is.
* @retval 0 It isn't.
*/
int rb_method_basic_definition_p(VALUE klass, ID mid);
/**
* Identical to rb_respond_to(), except it additionally takes the visibility
* parameter. This does not make difference unless the object has
* `respond_to?` undefined, but has `respond_to_missing?` defined. That case
* the passed argument becomes the second argument of `respond_to_missing?`.
*
* @param[in] obj The object in question.
* @param[in] mid The method name in question.
* @param[in] private_p This is the second argument of `obj`'s
* `respond_to_missing?`.
* @retval 1 Yes it does.
* @retval 0 No it doesn't.
*/
int rb_obj_respond_to(VALUE obj, ID mid, int private_p);
/**
* Queries if the object responds to the method. This involves calling the
* object's `respond_to?` method.
*
* @param[in] obj The object in question.
* @param[in] mid The method name in question.
* @retval 1 Yes it does.
* @retval 0 No it doesn't.
*/
int rb_respond_to(VALUE obj, ID mid);
RBIMPL_ATTR_NORETURN()
/**
* Raises ::rb_eNotImpError. This function is used as an argument to
* rb_define_method() etc.
*
* ```CXX
* rb_define_method(rb_cFoo, "foo", rb_f_notimplement, -1);
* ```
*
* @param argc Unused parameter.
* @param argv Unused parameter.
* @param obj Unused parameter.
* @param marker Unused parameter.
* @exception rb_eNotImpError Always.
* @return Never returns.
*
* @internal
*
* See also the Q&A section of include/ruby/internal/anyargs.h.
*/
VALUE rb_f_notimplement(int argc, const VALUE *argv, VALUE obj, VALUE marker);
#if !defined(RUBY_EXPORT) && defined(_WIN32)
RUBY_EXTERN VALUE (*const rb_f_notimplement_)(int, const VALUE *, VALUE, VALUE marker);
#define rb_f_notimplement (*rb_f_notimplement_)
#endif
/* vm_backtrace.c */
/**
* Prints the backtrace out to the standard error. This just confuses people
* for no reason. Evil souls must only use it.
*
* @internal
*
* Actually it is very useful when called from an interactive GDB session.
*/
void rb_backtrace(void);
/**
* Creates the good old fashioned array-of-strings style backtrace info.
*
* @return An array which contains strings, which are the textual
* representations of the backtrace locations of the current thread of
* the current ractor of the current execution context.
* @note Ruby scripts can access more sophisticated
* `Thread::Backtrace::Location`. But it seems there is no way for C
* extensions to use that API.
*/
VALUE rb_make_backtrace(void);
RBIMPL_SYMBOL_EXPORT_END()
#endif /* RBIMPL_INTERN_VM_H */
|
c
|
github
|
https://github.com/ruby/ruby
|
include/ruby/internal/intern/vm.h
|
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.cli.command.shell;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.ServiceLoader;
import java.util.Set;
import jline.console.ConsoleReader;
import jline.console.completer.CandidateListCompletionHandler;
import org.fusesource.jansi.AnsiRenderer.Code;
import org.jspecify.annotations.Nullable;
import org.springframework.boot.cli.command.Command;
import org.springframework.boot.cli.command.CommandFactory;
import org.springframework.boot.cli.command.CommandRunner;
import org.springframework.boot.cli.command.core.HelpCommand;
import org.springframework.boot.cli.command.core.VersionCommand;
import org.springframework.boot.loader.tools.SignalUtils;
import org.springframework.util.StringUtils;
/**
* A shell for Spring Boot. Drops the user into an event loop (REPL) where command line
* completion and history are available without relying on OS shell features.
*
* @author Jon Brisbin
* @author Dave Syer
* @author Phillip Webb
* @since 1.0.0
*/
public class Shell {
private static final Set<Class<?>> NON_FORKED_COMMANDS;
static {
Set<Class<?>> nonForked = new HashSet<>();
nonForked.add(VersionCommand.class);
NON_FORKED_COMMANDS = Collections.unmodifiableSet(nonForked);
}
private final ShellCommandRunner commandRunner;
private final ConsoleReader consoleReader;
private final EscapeAwareWhiteSpaceArgumentDelimiter argumentDelimiter = new EscapeAwareWhiteSpaceArgumentDelimiter();
private final ShellPrompts prompts = new ShellPrompts();
/**
* Create a new {@link Shell} instance.
* @throws IOException in case of I/O errors
*/
Shell() throws IOException {
attachSignalHandler();
this.consoleReader = new ConsoleReader();
this.commandRunner = createCommandRunner();
initializeConsoleReader();
}
private ShellCommandRunner createCommandRunner() {
ShellCommandRunner runner = new ShellCommandRunner();
runner.addCommand(new HelpCommand(runner));
runner.addCommands(getCommands());
runner.addAliases("exit", "quit");
runner.addAliases("help", "?");
runner.addAliases("clear", "cls");
return runner;
}
private Iterable<Command> getCommands() {
List<Command> commands = new ArrayList<>();
ServiceLoader<CommandFactory> factories = ServiceLoader.load(CommandFactory.class, getClass().getClassLoader());
for (CommandFactory factory : factories) {
for (Command command : factory.getCommands()) {
commands.add(convertToForkCommand(command));
}
}
commands.add(new PromptCommand(this.prompts));
commands.add(new ClearCommand(this.consoleReader));
commands.add(new ExitCommand());
return commands;
}
private Command convertToForkCommand(Command command) {
for (Class<?> nonForked : NON_FORKED_COMMANDS) {
if (nonForked.isInstance(command)) {
return command;
}
}
return new ForkProcessCommand(command);
}
private void initializeConsoleReader() {
this.consoleReader.setHistoryEnabled(true);
this.consoleReader.setBellEnabled(false);
this.consoleReader.setExpandEvents(false);
this.consoleReader
.addCompleter(new CommandCompleter(this.consoleReader, this.argumentDelimiter, this.commandRunner));
this.consoleReader.setCompletionHandler(new CandidateListCompletionHandler());
}
private void attachSignalHandler() {
SignalUtils.attachSignalHandler(this::handleSigInt);
}
/**
* Run the shell until the user exists.
* @throws Exception on error
*/
public void run() throws Exception {
printBanner();
try {
runInputLoop();
}
catch (Exception ex) {
if (!(ex instanceof ShellExitException)) {
throw ex;
}
}
}
private void printBanner() {
String version = getClass().getPackage().getImplementationVersion();
version = (version != null) ? " (v" + version + ")" : "";
System.out.println(ansi("Spring Boot", Code.BOLD).append(version, Code.FAINT));
System.out.println(ansi("Hit TAB to complete. Type 'help' and hit RETURN for help, and 'exit' to quit."));
}
private void runInputLoop() throws Exception {
String line;
while ((line = this.consoleReader.readLine(getPrompt())) != null) {
while (line.endsWith("\\")) {
line = line.substring(0, line.length() - 1);
line += this.consoleReader.readLine("> ");
}
if (StringUtils.hasLength(line)) {
String[] args = this.argumentDelimiter.parseArguments(line);
this.commandRunner.runAndHandleErrors(args);
}
}
}
private String getPrompt() {
String prompt = this.prompts.getPrompt();
return ansi(prompt, Code.FG_BLUE).toString();
}
private AnsiString ansi(String text, Code... codes) {
return new AnsiString(this.consoleReader.getTerminal()).append(text, codes);
}
/**
* Final handle an interrupt signal (CTRL-C).
*/
protected void handleSigInt() {
if (this.commandRunner.handleSigInt()) {
return;
}
System.out.println(String.format("%nThanks for using Spring Boot"));
System.exit(1);
}
/**
* Extension of {@link CommandRunner} to deal with {@link RunProcessCommand}s and
* aliases.
*/
private static class ShellCommandRunner extends CommandRunner {
private volatile @Nullable Command lastCommand;
private final Map<String, String> aliases = new HashMap<>();
ShellCommandRunner() {
super(null);
}
void addAliases(String command, String... aliases) {
for (String alias : aliases) {
this.aliases.put(alias, command);
}
}
@Override
public @Nullable Command findCommand(String name) {
if (name.startsWith("!")) {
return new RunProcessCommand(name.substring(1));
}
if (this.aliases.containsKey(name)) {
name = this.aliases.get(name);
}
return super.findCommand(name);
}
@Override
protected void beforeRun(Command command) {
this.lastCommand = command;
}
@Override
protected void afterRun(Command command) {
}
boolean handleSigInt() {
Command command = this.lastCommand;
if (command instanceof RunProcessCommand runProcessCommand) {
return runProcessCommand.handleSigInt();
}
return false;
}
}
}
|
java
|
github
|
https://github.com/spring-projects/spring-boot
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/shell/Shell.java
|
"""
Very minimal unittests for parts of the readline module.
"""
import codecs
import locale
import os
import sys
import tempfile
import textwrap
import threading
import unittest
from test import support
from test.support import threading_helper
from test.support import verbose
from test.support.import_helper import import_module
from test.support.os_helper import unlink, temp_dir, TESTFN
from test.support.pty_helper import run_pty
from test.support.script_helper import assert_python_ok
from test.support.threading_helper import requires_working_threading
# Skip tests if there is no readline module
readline = import_module('readline')
if hasattr(readline, "_READLINE_LIBRARY_VERSION"):
is_editline = ("EditLine wrapper" in readline._READLINE_LIBRARY_VERSION)
else:
is_editline = readline.backend == "editline"
def setUpModule():
if verbose:
# Python implementations other than CPython may not have
# these private attributes
if hasattr(readline, "_READLINE_VERSION"):
print(f"readline version: {readline._READLINE_VERSION:#x}")
print(f"readline runtime version: {readline._READLINE_RUNTIME_VERSION:#x}")
if hasattr(readline, "_READLINE_LIBRARY_VERSION"):
print(f"readline library version: {readline._READLINE_LIBRARY_VERSION!r}")
print(f"use libedit emulation? {is_editline}")
@unittest.skipUnless(hasattr(readline, "clear_history"),
"The history update test cannot be run because the "
"clear_history method is not available.")
class TestHistoryManipulation (unittest.TestCase):
"""
These tests were added to check that the libedit emulation on OSX and the
"real" readline have the same interface for history manipulation. That's
why the tests cover only a small subset of the interface.
"""
def testHistoryUpdates(self):
readline.clear_history()
readline.add_history("first line")
readline.add_history("second line")
self.assertEqual(readline.get_history_item(0), None)
self.assertEqual(readline.get_history_item(1), "first line")
self.assertEqual(readline.get_history_item(2), "second line")
readline.replace_history_item(0, "replaced line")
self.assertEqual(readline.get_history_item(0), None)
self.assertEqual(readline.get_history_item(1), "replaced line")
self.assertEqual(readline.get_history_item(2), "second line")
self.assertEqual(readline.get_current_history_length(), 2)
readline.remove_history_item(0)
self.assertEqual(readline.get_history_item(0), None)
self.assertEqual(readline.get_history_item(1), "second line")
self.assertEqual(readline.get_current_history_length(), 1)
@unittest.skipUnless(hasattr(readline, "append_history_file"),
"append_history not available")
def test_write_read_append(self):
hfile = tempfile.NamedTemporaryFile(delete=False)
hfile.close()
hfilename = hfile.name
self.addCleanup(unlink, hfilename)
# test write-clear-read == nop
readline.clear_history()
readline.add_history("first line")
readline.add_history("second line")
readline.write_history_file(hfilename)
readline.clear_history()
self.assertEqual(readline.get_current_history_length(), 0)
readline.read_history_file(hfilename)
self.assertEqual(readline.get_current_history_length(), 2)
self.assertEqual(readline.get_history_item(1), "first line")
self.assertEqual(readline.get_history_item(2), "second line")
# test append
readline.append_history_file(1, hfilename)
readline.clear_history()
readline.read_history_file(hfilename)
self.assertEqual(readline.get_current_history_length(), 3)
self.assertEqual(readline.get_history_item(1), "first line")
self.assertEqual(readline.get_history_item(2), "second line")
self.assertEqual(readline.get_history_item(3), "second line")
# test 'no such file' behaviour
os.unlink(hfilename)
try:
readline.append_history_file(1, hfilename)
except FileNotFoundError:
pass # Some implementations return this error (libreadline).
else:
os.unlink(hfilename) # Some create it anyways (libedit).
# If the file wasn't created, unlink will fail.
# We're just testing that one of the two expected behaviors happens
# instead of an incorrect error.
# write_history_file can create the target
readline.write_history_file(hfilename)
# Negative values should be disallowed
with self.assertRaises(ValueError):
readline.append_history_file(-42, hfilename)
# See gh-122431, using the minimum signed integer value caused a segfault
with self.assertRaises(ValueError):
readline.append_history_file(-2147483648, hfilename)
def test_nonascii_history(self):
readline.clear_history()
try:
readline.add_history("entrée 1")
except UnicodeEncodeError as err:
self.skipTest("Locale cannot encode test data: " + format(err))
readline.add_history("entrée 2")
readline.replace_history_item(1, "entrée 22")
readline.write_history_file(TESTFN)
self.addCleanup(os.remove, TESTFN)
readline.clear_history()
readline.read_history_file(TESTFN)
if is_editline:
# An add_history() call seems to be required for get_history_
# item() to register items from the file
readline.add_history("dummy")
self.assertEqual(readline.get_history_item(1), "entrée 1")
self.assertEqual(readline.get_history_item(2), "entrée 22")
def test_write_read_limited_history(self):
previous_length = readline.get_history_length()
self.addCleanup(readline.set_history_length, previous_length)
readline.clear_history()
readline.add_history("first line")
readline.add_history("second line")
readline.add_history("third line")
readline.set_history_length(2)
self.assertEqual(readline.get_history_length(), 2)
readline.write_history_file(TESTFN)
self.addCleanup(os.remove, TESTFN)
readline.clear_history()
self.assertEqual(readline.get_current_history_length(), 0)
self.assertEqual(readline.get_history_length(), 2)
readline.read_history_file(TESTFN)
self.assertEqual(readline.get_history_item(1), "second line")
self.assertEqual(readline.get_history_item(2), "third line")
self.assertEqual(readline.get_history_item(3), None)
# Readline seems to report an additional history element.
self.assertIn(readline.get_current_history_length(), (2, 3))
class TestReadline(unittest.TestCase):
@unittest.skipIf(readline._READLINE_VERSION < 0x0601 and not is_editline,
"not supported in this library version")
def test_init(self):
# Issue #19884: Ensure that the ANSI sequence "\033[1034h" is not
# written into stdout when the readline module is imported and stdout
# is redirected to a pipe.
rc, stdout, stderr = assert_python_ok('-c', 'import readline',
TERM='xterm-256color')
self.assertEqual(stdout, b'')
def test_backend(self):
self.assertIn(readline.backend, ("readline", "editline"))
auto_history_script = """\
import readline
readline.set_auto_history({})
input()
print("History length:", readline.get_current_history_length())
"""
def test_auto_history_enabled(self):
output = run_pty(self.auto_history_script.format(True))
# bpo-44949: Sometimes, the newline character is not written at the
# end, so don't expect it in the output.
self.assertIn(b"History length: 1", output)
def test_auto_history_disabled(self):
output = run_pty(self.auto_history_script.format(False))
# bpo-44949: Sometimes, the newline character is not written at the
# end, so don't expect it in the output.
self.assertIn(b"History length: 0", output)
def test_set_complete_delims(self):
script = textwrap.dedent("""
import readline
def complete(text, state):
if state == 0 and text == "$":
return "$complete"
return None
if readline.backend == "editline":
readline.parse_and_bind(r'bind "\\t" rl_complete')
else:
readline.parse_and_bind(r'"\\t": complete')
readline.set_completer_delims(" \\t\\n")
readline.set_completer(complete)
print(input())
""")
output = run_pty(script, input=b"$\t\n")
self.assertIn(b"$complete", output)
def test_nonascii(self):
loc = locale.setlocale(locale.LC_CTYPE, None)
if loc in ('C', 'POSIX'):
# bpo-29240: On FreeBSD, if the LC_CTYPE locale is C or POSIX,
# writing and reading non-ASCII bytes into/from a TTY works, but
# readline or ncurses ignores non-ASCII bytes on read.
self.skipTest(f"the LC_CTYPE locale is {loc!r}")
if sys.flags.utf8_mode:
encoding = locale.getencoding()
encoding = codecs.lookup(encoding).name # normalize the name
if encoding != "utf-8":
# gh-133711: The Python UTF-8 Mode ignores the LC_CTYPE locale
# and always use the UTF-8 encoding.
self.skipTest(f"the LC_CTYPE encoding is {encoding!r}")
try:
readline.add_history("\xEB\xEF")
except UnicodeEncodeError as err:
self.skipTest("Locale cannot encode test data: " + format(err))
script = r"""import readline
is_editline = readline.backend == "editline"
inserted = "[\xEFnserted]"
macro = "|t\xEB[after]"
set_pre_input_hook = getattr(readline, "set_pre_input_hook", None)
if is_editline or not set_pre_input_hook:
# The insert_line() call via pre_input_hook() does nothing with Editline,
# so include the extra text that would have been inserted here
macro = inserted + macro
if is_editline:
readline.parse_and_bind(r'bind ^B ed-prev-char')
readline.parse_and_bind(r'bind "\t" rl_complete')
readline.parse_and_bind(r'bind -s ^A "{}"'.format(macro))
else:
readline.parse_and_bind(r'Control-b: backward-char')
readline.parse_and_bind(r'"\t": complete')
readline.parse_and_bind(r'set disable-completion off')
readline.parse_and_bind(r'set show-all-if-ambiguous off')
readline.parse_and_bind(r'set show-all-if-unmodified off')
readline.parse_and_bind(r'Control-a: "{}"'.format(macro))
def pre_input_hook():
readline.insert_text(inserted)
readline.redisplay()
if set_pre_input_hook:
set_pre_input_hook(pre_input_hook)
def completer(text, state):
if text == "t\xEB":
if state == 0:
print("text", ascii(text))
print("line", ascii(readline.get_line_buffer()))
print("indexes", readline.get_begidx(), readline.get_endidx())
return "t\xEBnt"
if state == 1:
return "t\xEBxt"
if text == "t\xEBx" and state == 0:
return "t\xEBxt"
return None
readline.set_completer(completer)
def display(substitution, matches, longest_match_length):
print("substitution", ascii(substitution))
print("matches", ascii(matches))
readline.set_completion_display_matches_hook(display)
print("result", ascii(input()))
print("history", ascii(readline.get_history_item(1)))
"""
input = b"\x01" # Ctrl-A, expands to "|t\xEB[after]"
input += b"\x02" * len("[after]") # Move cursor back
input += b"\t\t" # Display possible completions
input += b"x\t" # Complete "t\xEBx" -> "t\xEBxt"
input += b"\r"
output = run_pty(script, input)
self.assertIn(b"text 't\\xeb'\r\n", output)
self.assertIn(b"line '[\\xefnserted]|t\\xeb[after]'\r\n", output)
if sys.platform == "darwin" or not is_editline:
self.assertIn(b"indexes 11 13\r\n", output)
# Non-macOS libedit does not handle non-ASCII bytes
# the same way and generates character indices
# rather than byte indices via get_begidx() and
# get_endidx(). Ex: libedit2 3.1-20191231-2 on Debian
# winds up with "indexes 10 12". Stemming from the
# start and end values calls back into readline.c's
# rl_attempted_completion_function = flex_complete with:
# (11, 13) instead of libreadline's (12, 15).
if not is_editline and hasattr(readline, "set_pre_input_hook"):
self.assertIn(b"substitution 't\\xeb'\r\n", output)
self.assertIn(b"matches ['t\\xebnt', 't\\xebxt']\r\n", output)
expected = br"'[\xefnserted]|t\xebxt[after]'"
self.assertIn(b"result " + expected + b"\r\n", output)
# bpo-45195: Sometimes, the newline character is not written at the
# end, so don't expect it in the output.
self.assertIn(b"history " + expected, output)
# We have 2 reasons to skip this test:
# - readline: history size was added in 6.0
# See https://cnswww.cns.cwru.edu/php/chet/readline/CHANGES
# - editline: history size is broken on OS X 10.11.6.
# Newer versions were not tested yet.
@unittest.skipIf(readline._READLINE_VERSION < 0x600,
"this readline version does not support history-size")
@unittest.skipIf(is_editline,
"editline history size configuration is broken")
def test_history_size(self):
history_size = 10
with temp_dir() as test_dir:
inputrc = os.path.join(test_dir, "inputrc")
with open(inputrc, "wb") as f:
f.write(b"set history-size %d\n" % history_size)
history_file = os.path.join(test_dir, "history")
with open(history_file, "wb") as f:
# history_size * 2 items crashes readline
data = b"".join(b"item %d\n" % i
for i in range(history_size * 2))
f.write(data)
script = """
import os
import readline
history_file = os.environ["HISTORY_FILE"]
readline.read_history_file(history_file)
input()
readline.write_history_file(history_file)
"""
env = dict(os.environ)
env["INPUTRC"] = inputrc
env["HISTORY_FILE"] = history_file
run_pty(script, input=b"last input\r", env=env)
with open(history_file, "rb") as f:
lines = f.readlines()
self.assertEqual(len(lines), history_size)
self.assertEqual(lines[-1].strip(), b"last input")
@requires_working_threading()
def test_gh123321_threadsafe(self):
"""gh-123321: readline should be thread-safe and not crash"""
script = textwrap.dedent(r"""
import threading
from test.support.threading_helper import join_thread
def func():
input()
thread1 = threading.Thread(target=func)
thread2 = threading.Thread(target=func)
thread1.start()
thread2.start()
join_thread(thread1)
join_thread(thread2)
print("done")
""")
output = run_pty(script, input=b"input1\rinput2\r")
self.assertIn(b"done", output)
def test_write_read_limited_history(self):
previous_length = readline.get_history_length()
self.addCleanup(readline.set_history_length, previous_length)
readline.add_history("first line")
readline.add_history("second line")
readline.add_history("third line")
readline.set_history_length(2)
self.assertEqual(readline.get_history_length(), 2)
readline.write_history_file(TESTFN)
self.addCleanup(os.remove, TESTFN)
readline.read_history_file(TESTFN)
# Without clear_history() there's no good way to test if
# the correct entries are present (we're combining history limiting and
# possible deduplication with arbitrary previous content).
# So, we've only tested that the read did not fail.
# See TestHistoryManipulation for the full test.
@unittest.skipUnless(hasattr(readline, "get_pre_input_hook"),
"get_pre_input_hook not available")
def test_get_pre_input_hook(self):
# Save and restore the original hook to avoid side effects
original_hook = readline.get_pre_input_hook()
self.addCleanup(readline.set_pre_input_hook, original_hook)
# Test that get_pre_input_hook returns None when no hook is set
readline.set_pre_input_hook(None)
self.assertIsNone(readline.get_pre_input_hook())
# Set a hook and verify we can retrieve it
def my_hook():
pass
readline.set_pre_input_hook(my_hook)
self.assertIs(readline.get_pre_input_hook(), my_hook)
@unittest.skipUnless(support.Py_GIL_DISABLED, 'these tests can only possibly fail with GIL disabled')
class FreeThreadingTest(unittest.TestCase):
@threading_helper.reap_threads
@threading_helper.requires_working_threading()
def test_free_threading(self):
def completer_delims(b):
b.wait()
for _ in range(100):
readline.get_completer_delims()
readline.set_completer_delims(' \t\n`@#%^&*()=+[{]}\\|;:\'",<>?')
readline.set_completer_delims(' \t\n`@#%^&*()=+[{]}\\|;:\'",<>?')
readline.get_completer_delims()
count = 40
barrier = threading.Barrier(count)
threads = [threading.Thread(target=completer_delims, args=(barrier,)) for _ in range(count)]
with threading_helper.start_threads(threads):
pass
if __name__ == "__main__":
unittest.main()
|
python
|
github
|
https://github.com/python/cpython
|
Lib/test/test_readline.py
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# 채팅 모델을 위한 템플릿[[templates-for-chat-models]]
## 소개[[introduction]]
요즘 LLM의 가장 흔한 활용 사례 중 하나는 **채팅**입니다. 채팅은 일반적인 언어 모델처럼 단일 문자열을 이어가는 대신 여러 개의 **메시지**로 구성된 대화를 이어갑니다. 이 대화에는 "사용자"나 "어시스턴트"와 같은 **역할**과 메시지 텍스트가 포함됩니다.
토큰화와 마찬가지로, 다양한 모델은 채팅에 대해 매우 다른 입력 형식을 기대합니다. 이것이 우리가 **채팅 템플릿**을 기능으로 추가한 이유입니다. 채팅 템플릿은 토크나이저의 일부입니다. 채팅 템플릿은 대화 목록을 모델이 기대하는 형식인 '단일 토큰화가 가능한 문자열'로 변환하는 방법을 지정합니다.
`BlenderBot` 모델을 사용한 간단한 예제를 통해 이를 구체적으로 살펴보겠습니다. BlenderBot은 기본적으로 매우 간단한 템플릿을 가지고 있으며, 주로 대화 라운드 사이에 공백을 추가합니다:
```python
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
>>> chat = [
... {"role": "user", "content": "Hello, how are you?"},
... {"role": "assistant", "content": "I'm doing great. How can I help you today?"},
... {"role": "user", "content": "I'd like to show off how chat templating works!"},
... ]
>>> tokenizer.apply_chat_template(chat, tokenize=False)
" Hello, how are you? I'm doing great. How can I help you today? I'd like to show off how chat templating works!</s>"
```
전체 채팅이 하나의 문자열로 압축된 것을 확인할 수 있습니다. 기본 설정인 `tokenize=True`를 사용하면, 그 문자열도 토큰화됩니다. 더 복잡한 템플릿을 사용하기 위해 `mistralai/Mistral-7B-Instruct-v0.1` 모델을 사용해 보겠습니다.
```python
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")
>>> chat = [
... {"role": "user", "content": "Hello, how are you?"},
... {"role": "assistant", "content": "I'm doing great. How can I help you today?"},
... {"role": "user", "content": "I'd like to show off how chat templating works!"},
... ]
>>> tokenizer.apply_chat_template(chat, tokenize=False)
"<s>[INST] Hello, how are you? [/INST]I'm doing great. How can I help you today?</s> [INST] I'd like to show off how chat templating works! [/INST]"
```
이번에는 토크나이저가 [INST]와 [/INST] 제어 토큰을 추가하여 사용자 메시지의 시작과 끝을 표시했습니다(어시스턴트 메시지 제외). Mistral-instruct는 이러한 토큰으로 훈련되었지만, BlenderBot은 그렇지 않았습니다.
## 채팅 템플릿을 어떻게 사용하나요?[[how-do-i-use-chat-templates]]
위의 예에서 볼 수 있듯이 채팅 템플릿은 사용하기 쉽습니다. `role`과 `content` 키가 포함된 메시지 목록을 작성한 다음, [`~PreTrainedTokenizer.apply_chat_template`] 메서드에 전달하기만 하면 됩니다. 이렇게 하면 바로 사용할 수 있는 출력이 생성됩니다! 모델 생성의 입력으로 채팅 템플릿을 사용할 때, `add_generation_prompt=True`를 사용하여 [생성 프롬프트](#what-are-generation-prompts)를 추가하는 것도 좋은 방법입니다.
다음은 `Zephyr` 어시스턴트 모델을 사용하여 `model.generate()`의 입력을 준비하는 예제입니다:
```python
from transformers import AutoModelForCausalLM, AutoTokenizer
checkpoint = "HuggingFaceH4/zephyr-7b-beta"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
model = AutoModelForCausalLM.from_pretrained(checkpoint) # 여기서 bfloat16 사용 및/또는 GPU로 이동할 수 있습니다.
messages = [
{
"role": "system",
"content": "You are a friendly chatbot who always responds in the style of a pirate",
},
{"role": "user", "content": "How many helicopters can a human eat in one sitting?"},
]
tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
print(tokenizer.decode(tokenized_chat[0]))
```
이렇게 하면 Zephyr가 기대하는 입력 형식의 문자열이 생성됩니다.
```text
<|system|>
You are a friendly chatbot who always responds in the style of a pirate</s>
<|user|>
How many helicopters can a human eat in one sitting?</s>
<|assistant|>
```
이제 입력이 Zephyr에 맞게 형식이 지정되었으므로 모델을 사용하여 사용자의 질문에 대한 응답을 생성할 수 있습니다:
```python
outputs = model.generate(tokenized_chat, max_new_tokens=128)
print(tokenizer.decode(outputs[0]))
```
이렇게 하면 다음과 같은 결과가 나옵니다:
```text
<|system|>
You are a friendly chatbot who always responds in the style of a pirate</s>
<|user|>
How many helicopters can a human eat in one sitting?</s>
<|assistant|>
Matey, I'm afraid I must inform ye that humans cannot eat helicopters. Helicopters are not food, they are flying machines. Food is meant to be eaten, like a hearty plate o' grog, a savory bowl o' stew, or a delicious loaf o' bread. But helicopters, they be for transportin' and movin' around, not for eatin'. So, I'd say none, me hearties. None at all.
```
이제 쉬워졌죠!
## 채팅을 위한 자동화된 파이프라인이 있나요?[[is-there-an-automated-pipeline-for-chat]]
네, 있습니다! 우리의 텍스트 생성 파이프라인은 채팅 입력을 지원하여 채팅 모델을 쉽게 사용할 수 있습니다. 이전에는 "ConversationalPipeline" 클래스를 사용했지만, 이제는 이 기능이 [`TextGenerationPipeline`]에 통합되었습니다. 이번에는 파이프라인을 사용하여 `Zephyr` 예제를 다시 시도해 보겠습니다:
```python
from transformers import pipeline
pipe = pipeline("text-generation", "HuggingFaceH4/zephyr-7b-beta")
messages = [
{
"role": "system",
"content": "You are a friendly chatbot who always responds in the style of a pirate",
},
{"role": "user", "content": "How many helicopters can a human eat in one sitting?"},
]
print(pipe(messages, max_new_tokens=128)[0]['generated_text'][-1]) # 어시스턴트의 응답을 출력합니다.
```
```text
{'role': 'assistant', 'content': "Matey, I'm afraid I must inform ye that humans cannot eat helicopters. Helicopters are not food, they are flying machines. Food is meant to be eaten, like a hearty plate o' grog, a savory bowl o' stew, or a delicious loaf o' bread. But helicopters, they be for transportin' and movin' around, not for eatin'. So, I'd say none, me hearties. None at all."}
```
파이프라인은 토큰화와 `apply_chat_template` 호출 의 세부 사항을 모두 처리해주기 때문에, 모델에 채팅 템플릿이 있으면 파이프라인을 초기화하고 메시지 목록을 전달하기만 하면 됩니다!
## "생성 프롬프트"란 무엇인가요?[[what-are-generation-prompts]]
`apply_chat_template` 메서드에는 `add_generation_prompt` 인수가 있다는 것을 눈치챘을 것입니다. 이 인수는 템플릿에 봇 응답의 시작을 나타내는 토큰을 추가하도록 지시합니다. 예를 들어, 다음과 같은 채팅을 고려해 보세요:
```python
messages = [
{"role": "user", "content": "Hi there!"},
{"role": "assistant", "content": "Nice to meet you!"},
{"role": "user", "content": "Can I ask a question?"}
]
```
Zephyr 예제에서 보았던 것과 같이, 생성 프롬프트 없이 ChatML 템플릿을 사용한다면 다음과 같이 보일 것입니다:
```python
tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False)
"""<|im_start|>user
Hi there!<|im_end|>
<|im_start|>assistant
Nice to meet you!<|im_end|>
<|im_start|>user
Can I ask a question?<|im_end|>
"""
```
생성 프롬프트가 **있는** 경우는 다음과 같습니다:
```python
tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
"""<|im_start|>user
Hi there!<|im_end|>
<|im_start|>assistant
Nice to meet you!<|im_end|>
<|im_start|>user
Can I ask a question?<|im_end|>
<|im_start|>assistant
"""
```
이번에는 봇 응답의 시작을 나타내는 토큰을 추가한 것을 주목하세요. 이렇게 하면 모델이 텍스트를 생성할 때 사용자의 메시지를 계속하는 대신 봇 응답을 작성하게 됩니다. 기억하세요, 채팅 모델은 여전히 언어 모델일 뿐이며, 그들에게 채팅은 특별한 종류의 텍스트일 뿐입니다! 적절한 제어 토큰으로 안내해야 채팅 모델이 무엇을 해야 하는지 알 수 있습니다.
모든 모델이 생성 프롬프트를 필요로 하는 것은 아닙니다. BlenderBot과 LLaMA 같은 일부 모델은 봇 응답 전에 특별한 토큰이 없습니다. 이러한 경우 `add_generation_prompt` 인수는 효과가 없습니다. `add_generation_prompt`의 정확한 효과는 사용 중인 템플릿에 따라 다릅니다.
## 채팅 템플릿을 훈련에 사용할 수 있나요?[[can-i-use-chat-templates-in-training]]
네! 이 방법은 채팅 템플릿을 모델이 훈련 중에 보는 토큰과 일치하도록 하는 좋은 방법입니다. 데이터 세트에 대한 전처리 단계로 채팅 템플릿을 적용하는 것이 좋습니다. 그 후에는 다른 언어 모델 훈련 작업과 같이 계속할 수 있습니다. 훈련할 때는 일반적으로 `add_generation_prompt=False`로 설정해야 합니다. 어시스턴트 응답을 프롬프트하는 추가 토큰은 훈련 중에는 도움이 되지 않기 때문입니다. 예제를 보겠습니다:
```python
from transformers import AutoTokenizer
from datasets import Dataset
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
chat1 = [
{"role": "user", "content": "Which is bigger, the moon or the sun?"},
{"role": "assistant", "content": "The sun."}
]
chat2 = [
{"role": "user", "content": "Which is bigger, a virus or a bacterium?"},
{"role": "assistant", "content": "A bacterium."}
]
dataset = Dataset.from_dict({"chat": [chat1, chat2]})
dataset = dataset.map(lambda x: {"formatted_chat": tokenizer.apply_chat_template(x["chat"], tokenize=False, add_generation_prompt=False)})
print(dataset['formatted_chat'][0])
```
다음과 같은 결과를 얻을 수 있습니다:
```text
<|user|>
Which is bigger, the moon or the sun?</s>
<|assistant|>
The sun.</s>
```
여기서부터는 일반적인 언어 모델 작업과 같이 `formatted_chat` 열을 사용하여 훈련을 계속하면 됩니다.
<Tip>
`apply_chat_template(tokenize=False)`로 텍스트를 형식화한 다음 별도의 단계에서 토큰화하는 경우, `add_special_tokens=False` 인수를 설정해야 합니다. `apply_chat_template(tokenize=True)`를 사용하는 경우에는 이 문제를 걱정할 필요가 없습니다!
기본적으로 일부 토크나이저는 토큰화할 때 `<bos>` 및 `<eos>`와 같은 특별 토큰을 추가합니다. 채팅 템플릿은 항상 필요한 모든 특별 토큰을 포함해야 하므로, 기본 `add_special_tokens=True`로 추가적인 특별 토큰을 추가하면 잘못되거나 중복되는 특별 토큰을 생성하여 모델 성능이 저하될 수 있습니다.
</Tip>
## 고급: 채팅 템플릿에 추가 입력 사용[[advanced-extra-inputs-to-chat-templates]]
`apply_chat_template`가 필요한 유일한 인수는 `messages`입니다. 그러나 `apply_chat_template`에 키워드 인수를 전달하면 템플릿 내부에서 사용할 수 있습니다. 이를 통해 채팅 템플릿을 다양한 용도로 사용할 수 있는 자유를 얻을 수 있습니다. 이러한 인수의 이름이나 형식에는 제한이 없어 문자열, 리스트, 딕셔너리 등을 전달할 수 있습니다.
그렇긴 하지만, 이러한 추가 인수의 일반적인 사용 사례로 '함수 호출을 위한 도구'나 '검색 증강 생성을 위한 문서'를 전달하는 것이 있습니다. 이러한 일반적인 경우에 대해 인수의 이름과 형식에 대한 몇 가지 권장 사항이 있으며, 이는 아래 섹션에 설명되어 있습니다. 우리는 모델 작성자에게 도구 호출 코드를 모델 간에 쉽게 전송할 수 있도록 채팅 템플릿을 이 형식과 호환되도록 만들 것을 권장합니다.
## 고급: 도구 사용 / 함수 호출[[advanced-tool-use--function-calling]]
"도구 사용" LLM은 답변을 생성하기 전에 외부 도구로서 함수를 호출할 수 있습니다. 도구 사용 모델에 도구를 전달할 때는 단순히 함수 목록을 `tools` 인수로 전달할 수 있습니다:
```python
import datetime
def current_time():
"""현재 현지 시간을 문자열로 가져옵니다."""
return str(datetime.now())
def multiply(a: float, b: float):
"""
두 숫자를 곱하는 함수
인수:
a: 곱할 첫 번째 숫자
b: 곱할 두 번째 숫자
"""
return a * b
tools = [current_time, multiply]
model_input = tokenizer.apply_chat_template(
messages,
tools=tools
)
```
이것이 올바르게 작동하려면 함수를 위 형식으로 작성해야 도구로 올바르게 구문 분석할 수 있습니다. 구체적으로 다음 규칙을 따라야 합니다:
- 함수는 설명적인 이름을 가져야 합니다.
- 모든 인수에는 타입 힌트가 있어야 합니다.
- 함수에는 표준 Google 스타일의 도크스트링이 있어야 합니다(즉, 초기 함수 설명 다음에 인수를 설명하는 `Args:` 블록이 있어야 합니다).
- `Args:` 블록에는 타입을 포함하지 마세요. 즉, `a (int): The first number to multiply` 대신 `a: The first number to multiply`라고 작성해야 합니다. 타입 힌트는 함수 헤더에 있어야 합니다.
- 함수에는 반환 타입과 도크스트링에 `Returns:` 블록이 있을 수 있습니다. 그러나 대부분의 도구 사용 모델은 이를 무시하므로 이는 선택 사항입니다.
### 도구 결과를 모델에 전달하기[[passing-tool-results-to-the-model]]
위의 예제 코드는 모델에 사용할 수 있는 도구를 나열하는 데 충분하지만, 실제로 사용하고자 하는 경우는 어떻게 해야 할까요? 이러한 경우에는 다음을 수행해야 합니다:
1. 모델의 출력을 파싱하여 도구 이름과 인수를 가져옵니다.
2. 모델의 도구 호출을 대화에 추가합니다.
3. 해당 인수에 대응하는 함수를 호출합니다.
4. 결과를 대화에 추가합니다.
### 도구 사용 예제[[a-complete-tool-use-example]]
도구 사용 예제를 단계별로 살펴보겠습니다. 이 예제에서는 도구 사용 모델 중에서 성능이 가장 우수한 8B `Hermes-2-Pro` 모델을 사용할 것입니다. 메모리가 충분하다면, 더 큰 모델인 [Command-R](https://huggingface.co/CohereForAI/c4ai-command-r-v01) 또는 [Mixtral-8x22B](https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1)를 사용하는 것도 고려할 수 있습니다. 이 두 모델 모두 도구 사용을 지원하며 더 강력한 성능을 제공합니다.
먼저 모델과 토크나이저를 로드해 보겠습니다:
```python
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
checkpoint = "NousResearch/Hermes-2-Pro-Llama-3-8B"
tokenizer = AutoTokenizer.from_pretrained(checkpoint, revision="pr/13")
model = AutoModelForCausalLM.from_pretrained(checkpoint, dtype=torch.bfloat16, device_map="auto")
```
다음으로, 도구 목록을 정의해 보겠습니다:
```python
def get_current_temperature(location: str, unit: str) -> float:
"""
특정 위치의 현재 온도를 가져옵니다.
인수:
위치: 온도를 가져올 위치, "도시, 국가" 형식
단위: 온도 단위 (선택지: ["celsius", "fahrenheit"])
반환값:
지정된 위치의 현재 온도를 지정된 단위로 반환, float 형식.
"""
return 22. # 이 함수는 실제로 온도를 가져와야 할 것입니다!
def get_current_wind_speed(location: str) -> float:
"""
주어진 위치의 현재 풍속을 km/h 단위로 가져옵니다.
인수:
위치(location): 풍속을 가져올 위치, "도시, 국가" 형식
반환값:
주어진 위치의 현재 풍속을 km/h 단위로 반환, float 형식.
"""
return 6. # 이 함수는 실제로 풍속을 가져와야 할 것입니다!
tools = [get_current_temperature, get_current_wind_speed]
```
이제 봇을 위한 대화를 설정해 보겠습니다:
```python
messages = [
{"role": "system", "content": "You are a bot that responds to weather queries. You should reply with the unit used in the queried location."},
{"role": "user", "content": "Hey, what's the temperature in Paris right now?"}
]
```
이제 채팅 템플릿을 적용하고 응답을 생성해 보겠습니다:
```python
inputs = tokenizer.apply_chat_template(messages, chat_template="tool_use", tools=tools, add_generation_prompt=True, return_dict=True, return_tensors="pt")
inputs = {k: v.to(model.device) for k, v in inputs.items()}
out = model.generate(**inputs, max_new_tokens=128)
print(tokenizer.decode(out[0][len(inputs["input_ids"][0]):]))
```
결과는 다음과 같습니다:
```text
<tool_call>
{"arguments": {"location": "Paris, France", "unit": "celsius"}, "name": "get_current_temperature"}
</tool_call><|im_end|>
```
모델이 함수 호출을 유효한 인수로 수행했으며, 함수 도크스트링에 요청된 형식으로 호출했음을 알 수 있습니다. 모델은 우리가 프랑스의 파리를 지칭하고 있다는 것을 추론했고, 프랑스가 SI 단위의 본고장임을 기억하여 온도를 섭씨로 표시해야 한다고 판단했습니다.
모델의 도구 호출을 대화에 추가해 보겠습니다. 여기서 임의의 `tool_call_id`를 생성합니다. 이 ID는 모든 모델에서 사용되는 것은 아니지만, 여러 도구 호출을 한 번에 발행하고 각 응답이 어느 호출에 해당하는지 추적할 수 있게 해줍니다. 이 ID는 대화 내에서 고유해야 합니다.
```python
tool_call_id = "vAHdf3" # 임의의 ID, 각 도구 호출마다 고유해야 함
tool_call = {"name": "get_current_temperature", "arguments": {"location": "Paris, France", "unit": "celsius"}}
messages.append({"role": "assistant", "tool_calls": [{"id": tool_call_id, "type": "function", "function": tool_call}]})
```
이제 도구 호출을 대화에 추가했으므로, 함수를 호출하고 결과를 대화에 추가할 수 있습니다. 이 예제에서는 항상 22.0을 반환하는 더미 함수를 사용하고 있으므로, 결과를 직접 추가하면 됩니다. 다시 한 번, `tool_call_id`는 도구 호출에 사용했던 ID와 일치해야 합니다.
```python
messages.append({"role": "tool", "tool_call_id": tool_call_id, "name": "get_current_temperature", "content": "22.0"})
```
마지막으로, 어시스턴트가 함수 출력을 읽고 사용자와 계속 대화할 수 있도록 하겠습니다:
```python
inputs = tokenizer.apply_chat_template(messages, chat_template="tool_use", tools=tools, add_generation_prompt=True, return_dict=True, return_tensors="pt")
inputs = {k: v.to(model.device) for k, v in inputs.items()}
out = model.generate(**inputs, max_new_tokens=128)
print(tokenizer.decode(out[0][len(inputs["input_ids"][0]):]))
```
결과는 다음과 같습니다:
```text
The current temperature in Paris, France is 22.0 ° Celsius.<|im_end|>
```
이것은 더미 도구와 단일 호출을 사용한 간단한 데모였지만, 동일한 기술을 사용하여 여러 실제 도구와 더 긴 대화를 처리할 수 있습니다. 이를 통해 실시간 정보, 계산 도구 또는 대규모 데이터베이스에 접근하여 대화형 에이전트의 기능을 확장할 수 있습니다.
<Tip>
위에서 보여준 도구 호출 기능은 모든 모델에서 사용되는 것은 아닙니다. 일부 모델은 도구 호출 ID를 사용하고, 일부는 함수 이름만 사용하여 결과와 도구 호출을 순서에 따라 매칭하며, 혼동을 피하기 위해 한 번에 하나의 도구 호출만 발행하는 모델도 있습니다. 가능한 많은 모델과 호환되는 코드를 원한다면, 여기에 보여준 것처럼 도구 호출을 구성하고, 모델이 발행한 순서대로 도구 결과를 반환하는 것을 권장합니다. 각 모델의 채팅 템플릿이 나머지 작업을 처리할 것입니다.
</Tip>
### 도구 스키마 이해하기[[understanding-tool-schemas]]
`apply_chat_template`의 `tools` 인수에 전달하는 각 함수는 [JSON 스키마](https://json-schema.org/learn/getting-started-step-by-step)로 변환됩니다. 이러한 스키마는 모델 채팅 템플릿에 전달됩니다. 즉, 도구 사용 모델은 함수 자체를 직접 보지 않으며, 함수 내부의 실제 코드를 보지 않습니다. 도구 사용 모델이 관심을 가지는 것은 함수 **정의**와 **인수**입니다. 함수가 무엇을 하고 어떻게 사용하는지에 관심이 있을 뿐, 어떻게 작동하는지는 중요하지 않습니다! 모델의 출력을 읽고 모델이 도구 사용을 요청했는지 감지하여, 인수를 도구 함수에 전달하고 채팅에서 응답을 반환하는 것은 여러분의 몫입니다.
위의 규격을 따른다면, 템플릿에 전달할 JSON 스키마 생성을 자동화하고 보이지 않게 처리하는 것이 좋습니다. 그러나 문제가 발생하거나 변환을 더 제어하고 싶다면 수동으로 변환을 처리할 수 있습니다. 다음은 수동 스키마 변환 예제입니다.
```python
from transformers.utils import get_json_schema
def multiply(a: float, b: float):
"""
두 숫자를 곱하는 함수
인수:
a: 곱할 첫 번째 숫자
b: 곱할 두 번째 숫자
"""
return a * b
schema = get_json_schema(multiply)
print(schema)
```
이 결과는 다음과 같습니다:
```json
{
"type": "function",
"function": {
"name": "multiply",
"description": "A function that multiplies two numbers",
"parameters": {
"type": "object",
"properties": {
"a": {
"type": "number",
"description": "The first number to multiply"
},
"b": {
"type": "number",
"description": "The second number to multiply"
}
},
"required": ["a", "b"]
}
}
}
```
원한다면 이러한 스키마를 편집하거나 `get_json_schema`를 전혀 사용하지 않고 처음부터 직접 작성할 수도 있습니다. JSON 스키마는 `apply_chat_template`의 `tools` 인수에 직접 전달할 수 있습니다. 이를 통해 더 복잡한 함수에 대한 정밀한 스키마를 정의할 수 있게 됩니다. 그러나 스키마가 복잡할수록 모델이 처리하는 데 혼란을 겪을 가능성이 높아집니다! 가능한 한 간단한 함수 서명을 유지하고, 인수(특히 복잡하고 중첩된 인수)를 최소화하는 것을 권장합니다.
여기 직접 스키마를 정의하고 이를 `apply_chat_template`에 전달하는 예제가 있습니다:
```python
# 인수를 받지 않는 간단한 함수
current_time = {
"type": "function",
"function": {
"name": "current_time",
"description": "Get the current local time as a string.",
"parameters": {
'type': 'object',
'properties': {}
}
}
}
# 두 개의 숫자 인수를 받는 더 완전한 함수
multiply = {
'type': 'function',
'function': {
'name': 'multiply',
'description': 'A function that multiplies two numbers',
'parameters': {
'type': 'object',
'properties': {
'a': {
'type': 'number',
'description': 'The first number to multiply'
},
'b': {
'type': 'number', 'description': 'The second number to multiply'
}
},
'required': ['a', 'b']
}
}
}
model_input = tokenizer.apply_chat_template(
messages,
tools = [current_time, multiply]
)
```
## 고급: 검색 증강 생성[[advanced-retrieval-augmented-generation]]
"검색 증강 생성" 또는 "RAG" LLM은 쿼리에 응답하기 전에 문서의 코퍼스를 검색하여 정보를 얻을 수 있습니다. 이를 통해 모델은 제한된 컨텍스트 크기 이상으로 지식 기반을 크게 확장할 수 있습니다. RAG 모델에 대한 우리의 권장 사항은 템플릿이 `documents` 인수를 허용해야 한다는 것입니다. 이 인수는 각 "문서"가 `title`과 `contents` 키를 가지는 단일 dict인 문서 목록이어야 합니다. 이 형식은 도구에 사용되는 JSON 스키마보다 훨씬 간단하므로 별도의 도우미 함수가 필요하지 않습니다.
다음은 RAG 템플릿이 작동하는 예제입니다:
```python
document1 = {
"title": "The Moon: Our Age-Old Foe",
"contents": "Man has always dreamed of destroying the moon. In this essay, I shall..."
}
document2 = {
"title": "The Sun: Our Age-Old Friend",
"contents": "Although often underappreciated, the sun provides several notable benefits..."
}
model_input = tokenizer.apply_chat_template(
messages,
documents=[document1, document2]
)
```
## 고급: 채팅 템플릿은 어떻게 작동하나요?[[advanced-how-do-chat-templates-work]]
모델의 채팅 템플릿은 `tokenizer.chat_template` 속성에 저장됩니다. 채팅 템플릿이 설정되지 않은 경우 해당 모델 클래스의 기본 템플릿이 대신 사용됩니다. `BlenderBot`의 템플릿을 살펴보겠습니다:
```python
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
>>> tokenizer.chat_template
"{% for message in messages %}{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}{{ message['content'] }}{% if not loop.last %}{{ ' ' }}{% endif %}{% endfor %}{{ eos_token }}"
```
약간 복잡해 보일 수 있습니다. 읽기 쉽게 정리해 보겠습니다. 이 과정에서 추가하는 줄바꿈과 들여쓰기가 템플릿 출력에 포함되지 않도록 해야 합니다. 아래는 [공백을 제거하는](#trimming-whitespace) 팁입니다:
```
{%- for message in messages %}
{%- if message['role'] == 'user' %}
{{- ' ' }}
{%- endif %}
{{- message['content'] }}
{%- if not loop.last %}
{{- ' ' }}
{%- endif %}
{%- endfor %}
{{- eos_token }}
```
만약 이와 같은 형식을 처음 본다면, 이것은 [Jinja 템플릿](https://jinja.palletsprojects.com/en/3.1.x/templates/)입니다.
Jinja는 텍스트를 생성하는 간단한 코드를 작성할 수 있는 템플릿 언어입니다. 많은 면에서 코드와 구문이 파이썬과 유사합니다. 순수 파이썬에서는 이 템플릿이 다음과 같이 보일 것입니다:
```python
for idx, message in enumerate(messages):
if message['role'] == 'user':
print(' ')
print(message['content'])
if not idx == len(messages) - 1: # Check for the last message in the conversation
print(' ')
print(eos_token)
```
이 템플릿은 세 가지 일을 합니다:
1. 각 메시지에 대해, 메시지가 사용자 메시지인 경우 공백을 추가하고, 그렇지 않으면 아무것도 출력하지 않습니다.
2. 메시지 내용을 추가합니다.
3. 메시지가 마지막 메시지가 아닌 경우 두 개의 공백을 추가합니다. 마지막 메시지 후에는 EOS 토큰을 출력합니다.
이것은 매우 간단한 템플릿입니다. 제어 토큰을 추가하지 않으며, 이후 대화에서 모델이 어떻게 동작해야 하는지 지시하는 "시스템" 메시지를 지원하지 않습니다. 하지만 Jinja는 이러한 작업을 수행할 수 있는 많은 유연성을 제공합니다! LLaMA가 입력을 형식화하는 방식과 유사한 형식의 Jinja 템플릿을 살펴보겠습니다(실제 LLaMA 템플릿은 기본 시스템 메시지 처리와 일반적인 시스템 메시지 처리를 포함하고 있습니다 - 실제 코드에서는 이 템플릿을 사용하지 마세요!).
```
{%- for message in messages %}
{%- if message['role'] == 'user' %}
{{- bos_token + '[INST] ' + message['content'] + ' [/INST]' }}
{%- elif message['role'] == 'system' %}
{{- '<<SYS>>\\n' + message['content'] + '\\n<</SYS>>\\n\\n' }}
{%- elif message['role'] == 'assistant' %}
{{- ' ' + message['content'] + ' ' + eos_token }}
{%- endif %}
{%- endfor %}
```
이 템플릿을 잠시 살펴보면 무엇을 하는지 이해할 수 있습니다. 먼저, 각 메시지의 "role"에 따라 특정 토큰을 추가하여 누가 메시지를 보냈는지 모델에게 명확하게 알려줍니다. 또한 사용자, 어시스턴트 및 시스템 메시지는 각각 고유한 토큰으로 래핑되어 모델이 명확하게 구분할 수 있습니다.
## 고급: 채팅 템플릿 추가 및 편집[[advanced-adding-and-editing-chat-templates]]
### 채팅 템플릿을 어떻게 만들 수 있나요?[[how-do-i-create-a-chat-template]]
간단합니다. Jinja 템플릿을 작성하고 `tokenizer.chat_template`에 설정하기만 하면 됩니다. 다른 모델의 기존 템플릿을 시작점으로 사용하고 필요에 맞게 편집하는 것이 더 쉬울 것 입니다! 예를 들어, 위의 LLaMA 템플릿을 가져와 어시스턴트 메시지에 "[ASST]" 및 "[/ASST]"를 추가할 수 있습니다:
```
{%- for message in messages %}
{%- if message['role'] == 'user' %}
{{- bos_token + '[INST] ' + message['content'].strip() + ' [/INST]' }}
{%- elif message['role'] == 'system' %}
{{- '<<SYS>>\\n' + message['content'].strip() + '\\n<</SYS>>\\n\\n' }}
{%- elif message['role'] == 'assistant' %}
{{- '[ASST] ' + message['content'] + ' [/ASST]' + eos_token }}
{%- endif %}
{%- endfor %}
```
이제 `tokenizer.chat_template` 속성을 설정하기만 하면 됩니다. 이렇게 하면 다음에 [`~PreTrainedTokenizer.apply_chat_template`]를 사용할 때 새롭게 설정한 템플릿이 사용됩니다! 이 속성은 `tokenizer_config.json` 파일에 저장되므로, [`~utils.PushToHubMixin.push_to_hub`]를 사용하여 새 템플릿을 허브에 업로드하고 모든 사용자가 모델에 맞는 템플릿을 사용할 수 있도록 할 수 있습니다!
```python
template = tokenizer.chat_template
template = template.replace("SYS", "SYSTEM") # 시스템 토큰 변경
tokenizer.chat_template = template # 새 템플릿 설정
tokenizer.push_to_hub("model_name") # 새 템플릿을 허브에 업로드!
```
채팅 템플릿을 사용하는 [`~PreTrainedTokenizer.apply_chat_template`] 메소드는 [`TextGenerationPipeline`] 클래스에서 호출되므로, 올바른 채팅 템플릿을 설정하면 모델이 자동으로 [`TextGenerationPipeline`]과 호환됩니다.
<Tip>
모델을 채팅 용도로 미세 조정하는 경우, 채팅 템플릿을 설정하는 것 외에도 새 채팅 제어 토큰을 토크나이저에 특별 토큰으로 추가하는 것이 좋습니다. 특별 토큰은 절대로 분할되지 않으므로, 제어 토큰이 여러 조각으로 토큰화되는 것을 방지합니다. 또한, 템플릿에서 어시스턴트 생성의 끝을 나타내는 토큰으로 토크나이저의 `eos_token` 속성을 설정해야 합니다. 이렇게 하면 텍스트 생성 도구가 텍스트 생성을 언제 중지해야 할지 정확히 알 수 있습니다.
</Tip>
### 왜 일부 모델은 여러 개의 템플릿을 가지고 있나요?[[why-do-some-models-have-multiple-templates]]
일부 모델은 다른 사용 사례에 대해 다른 템플릿을 사용합니다. 예를 들어, 일반 채팅을 위한 템플릿과 도구 사용 또는 검색 증강 생성에 대한 템플릿을 별도로 사용할 수 있습니다. 이러한 경우 `tokenizer.chat_template`는 딕셔너리입니다. 이것은 약간의 혼란을 초래할 수 있으며, 가능한 한 모든 사용 사례에 대해 단일 템플릿을 사용하는 것을 권장합니다. `if tools is defined`와 같은 Jinja 문장과 `{% macro %}` 정의를 사용하여 여러 코드 경로를 단일 템플릿에 쉽게 래핑할 수 있습니다.
토크나이저에 여러 개의 템플릿이 있는 경우, `tokenizer.chat_template`는 템플릿 이름이 키인 `딕셔너리`입니다. `apply_chat_template` 메소드는 특정 템플릿 이름에 대한 특별한 처리를 합니다: 일반적으로 `default`라는 템플릿을 찾고, 찾을 수 없으면 오류를 발생시킵니다. 그러나 사용자가 `tools` 인수를 전달할 때 `tool_use`라는 템플릿이 존재하면 대신 그것을 사용합니다. 다른 이름의 템플릿에 접근하려면 `apply_chat_template()`의 `chat_template` 인수에 원하는 템플릿 이름을 전달하면 됩니다.
사용자에게 약간의 혼란을 줄 수 있으므로, 템플릿을 직접 작성하는 경우 가능한 한 단일 템플릿에 모든 것을 넣는 것을 권장합니다!
### 어떤 템플릿을 사용해야 하나요?[[what-template-should-i-use]]
이미 채팅용으로 훈련된 모델에 템플릿을 설정할 때는 템플릿이 훈련 중 모델이 본 메시지 형식과 정확히 일치하도록 해야 합니다. 그렇지 않으면 성능 저하를 경험할 가능성이 큽니다. 이는 모델을 추가로 훈련할 때도 마찬가지입니다. 채팅 토큰을 일정하게 유지하는 것이 최상의 성능을 얻는 방법입니다. 이는 토큰화와 매우 유사합니다. 훈련 중에 사용된 토큰화를 정확히 일치시킬 때 추론이나 미세 조정에서 최고의 성능을 얻을 수 있습니다.
반면에 처음부터 모델을 훈련시키거나 채팅용으로 기본 언어 모델을 미세 조정하는 경우, 적절한 템플릿을 선택할 수 있는 많은 자유가 있습니다. LLM은 다양한 입력 형식을 처리할 만큼 충분히 똑똑합니다. 인기 있는 선택 중 하나는 `ChatML` 형식이며, 이는 많은 사용 사례에 유연하게 사용할 수 있는 좋은 선택입니다. 다음과 같습니다:
```
{%- for message in messages %}
{{- '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n' }}
{%- endfor %}
```
이 템플릿이 마음에 든다면, 코드에 바로 복사하여 사용할 수 있는 한 줄 버전을 제공하겠습니다. 이 한 줄 버전은 [생성 프롬프트](#what-are-generation-prompts)에 대한 편리한 지원도 포함하고 있지만, BOS나 EOS 토큰을 추가하지 않는다는 점에 유의하세요! 모델이 해당 토큰을 기대하더라도, `apply_chat_template`에 의해 자동으로 추가되지 않습니다. 즉, 텍스트는 `add_special_tokens=False`에 의해 토큰화됩니다. 이는 템플릿과 `add_special_tokens` 논리 간의 잠재적인 충돌을 피하기 위함입니다. 모델이 특별 토큰을 기대하는 경우, 템플릿에 직접 추가해야 합니다!
```python
tokenizer.chat_template = "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"
```
이 템플릿은 각 메시지를 `<|im_start|>` 와 `<|im_end|>`토큰으로 감싸고, 역할을 문자열로 작성하여 훈련 시 사용하는 역할에 대한 유연성을 제공합니다. 출력은 다음과 같습니다:
```text
<|im_start|>system
You are a helpful chatbot that will do its best not to say anything so stupid that people tweet about it.<|im_end|>
<|im_start|>user
How are you?<|im_end|>
<|im_start|>assistant
I'm doing great!<|im_end|>
```
"사용자", "시스템" 및 "어시스턴트" 역할은 채팅의 표준이며, 가능할 때 이를 사용하는 것을 권장합니다. 특히 모델이 [`TextGenerationPipeline`]과 잘 작동하도록 하려면 그렇습니다. 그러나 이러한 역할에만 국한되지 않습니다. 템플릿은 매우 유연하며, 어떤 문자열이든 역할로 사용할 수 있습니다.
### 채팅 템플릿을 추가하고 싶습니다! 어떻게 시작해야 하나요?[[i-want-to-add-some-chat-templates-how-should-i-get-started]]
채팅 모델이 있는 경우, 해당 모델의 `tokenizer.chat_template` 속성을 설정하고 [`~PreTrainedTokenizer.apply_chat_template`]를 사용하여 테스트한 다음 업데이트된 토크나이저를 허브에 푸시해야 합니다. 이는 모델 소유자가 아닌 경우에도 적용됩니다. 빈 채팅 템플릿을 사용하는 모델이나 여전히 기본 클래스 템플릿을 사용하는 모델을 사용하는 경우, [풀 리퀘스트](https://huggingface.co/docs/hub/repositories-pull-requests-discussions)를 모델 리포지토리에 열어 이 속성을 올바르게 설정할 수 있도록 하세요!
속성을 설정하면 끝입니다! `tokenizer.apply_chat_template`가 이제 해당 모델에 대해 올바르게 작동하므로, `TextGenerationPipeline`과 같은 곳에서도 자동으로 지원됩니다!
모델에 이 속성을 설정함으로써, 오픈 소스 모델의 전체 기능을 커뮤니티가 사용할 수 있도록 할 수 있습니다. 형식 불일치는 이 분야에서 오랫동안 성능을 저하시키는 문제였으므로, 이제 이를 끝낼 때입니다!
## 고급: 템플릿 작성 팁[[advanced-template-writing-tips]]
Jinja에 익숙하지 않은 경우, 채팅 템플릿을 작성하는 가장 쉬운 방법은 먼저 메시지를 원하는 방식으로 형식화하는 짧은 파이썬 스크립트를 작성한 다음, 해당 스크립트를 템플릿으로 변환하는 것입니다.
템플릿 핸들러는 `messages`라는 변수로 대화 기록을 받습니다. 파이썬에서와 마찬가지로 템플릿 내의 `messages`에 접근할 수 있으며, `{% for message in messages %}`로 반복하거나 `{{ messages[0] }}`와 같이 개별 메시지에 접근할 수 있습니다.
다음 팁을 사용하여 코드를 Jinja로 변환할 수도 있습니다:
### 공백 제거[[trimming-whitespace]]
기본적으로 Jinja는 블록 전후의 공백을 출력합니다. 이는 일반적으로 공백을 매우 정확하게 다루고자 하는 채팅 템플릿에서는 문제가 될 수 있습니다! 이를 피하기 위해 템플릿을 다음과 같이 작성하는 것이 좋습니다:
```
{%- for message in messages %}
{{- message['role'] + message['content'] }}
{%- endfor %}
```
아래와 같이 작성하지 마세요:
```
{% for message in messages %}
{{ message['role'] + message['content'] }}
{% endfor %}
```
`-`를 추가하면 블록 전후의 공백이 제거됩니다. 두 번째 예제는 무해해 보이지만, 줄바꿈과 들여쓰기가 출력에 포함될 수 있으며, 이는 원하지 않는 결과일 수 있습니다!
### 반복문[[for-loops]]
Jinja에서 반복문은 다음과 같습니다:
```
{%- for message in messages %}
{{- message['content'] }}
{%- endfor %}
```
{{ 표현식 블록 }} 내부에 있는 모든 것이 출력으로 인쇄됩니다. `+`와 같은 연산자를 사용하여 표현식 블록 내부에서 문자열을 결합할 수 있습니다.
### 조건문[[if-statements]]
Jinja에서 조건문은 다음과 같습니다:
```
{%- if message['role'] == 'user' %}
{{- message['content'] }}
{%- endif %}
```
파이썬이 공백을 사용하여 `for` 및 `if` 블록의 시작과 끝을 표시하는 반면, Jinja는 `{% endfor %}` 및 `{% endif %}`로 명시적으로 끝을 표시해야 합니다.
### 특수 변수[[special-variables]]
템플릿 내부에서는 `messages` 목록에 접근할 수 있을 뿐만 아니라 여러 다른 특수 변수에도 접근할 수 있습니다. 여기에는 `bos_token` 및 `eos_token`과 같은 특별 토큰과 앞서 논의한 `add_generation_prompt` 변수가 포함됩니다. 또한 `loop` 변수를 사용하여 현재 반복에 대한 정보를 얻을 수 있으며, 예를 들어 `{% if loop.last %}`를 사용하여 현재 메시지가 대화의 마지막 메시지인지 확인할 수 있습니다. `add_generation_prompt`가 `True`인 경우 대화 끝에 생성 프롬프트를 추가하는 예제는 다음과 같습니다:
```
{%- if loop.last and add_generation_prompt %}
{{- bos_token + 'Assistant:\n' }}
{%- endif %}
```
### 비파이썬 Jinja와의 호환성[[compatibility-with-non-python-jinja]]
Jinja의 여러 구현은 다양한 언어로 제공됩니다. 일반적으로 동일한 구문을 사용하지만, 주요 차이점은 파이썬에서 템플릿을 작성할 때 파이썬 메소드를 사용할 수 있다는 점입니다. 예를 들어, 문자열에 `.lower()`를 사용하거나 딕셔너리에 `.items()`를 사용하는 것입니다. 이는 비파이썬 Jinja 구현에서 템플릿을 사용하려고 할 때 문제가 발생할 수 있습니다. 특히 JS와 Rust가 인기 있는 배포 환경에서는 비파이썬 구현이 흔합니다.
하지만 걱정하지 마세요! 모든 Jinja 구현에서 호환성을 보장하기 위해 템플릿을 쉽게 변경할 수 있는 몇 가지 방법이 있습니다:
- 파이썬 메소드를 Jinja 필터로 대체하세요. 일반적으로 같은 이름을 가지며, 예를 들어 `string.lower()`는 `string|lower`로, `dict.items()`는 `dict|items`로 대체할 수 있습니다. 주목할 만한 변경 사항은 `string.strip()`이 `string|trim`으로 바뀌는 것입니다. 더 자세한 내용은 Jinja 문서의 [내장 필터 목록](https://jinja.palletsprojects.com/en/3.1.x/templates/#builtin-filters)을 참조하세요.
- 파이썬에 특화된 `True`, `False`, `None`을 각각 `true`, `false`, `none`으로 대체하세요.
- 딕셔너리나 리스트를 직접 렌더링할 때 다른 구현에서는 결과가 다를 수 있습니다(예: 문자열 항목이 단일 따옴표에서 이중 따옴표로 변경될 수 있습니다). `tojson` 필터를 추가하면 일관성을 유지하는 데 도움이 됩니다.
|
unknown
|
github
|
https://github.com/huggingface/transformers
|
docs/source/ko/chat_templating.md
|
from openerp.osv import fields, osv
class website_config_settings(osv.osv_memory):
_name = 'website.config.settings'
_inherit = 'res.config.settings'
_columns = {
'website_id': fields.many2one('website', string="website", required=True),
'website_name': fields.related('website_id', 'name', type="char", string="Website Name"),
'language_ids': fields.related('website_id', 'language_ids', type='many2many', relation='res.lang', string='Languages'),
'default_lang_id': fields.related('website_id', 'default_lang_id', type='many2one', relation='res.lang', string='Default language'),
'default_lang_code': fields.related('website_id', 'default_lang_code', type="char", string="Default language code"),
'google_analytics_key': fields.related('website_id', 'google_analytics_key', type="char", string='Google Analytics Key'),
'social_twitter': fields.related('website_id', 'social_twitter', type="char", string='Twitter Account'),
'social_facebook': fields.related('website_id', 'social_facebook', type="char", string='Facebook Account'),
'social_github': fields.related('website_id', 'social_github', type="char", string='GitHub Account'),
'social_linkedin': fields.related('website_id', 'social_linkedin', type="char", string='LinkedIn Account'),
'social_youtube': fields.related('website_id', 'social_youtube', type="char", string='Youtube Account'),
'social_googleplus': fields.related('website_id', 'social_googleplus', type="char", string='Google+ Account'),
}
def on_change_website_id(self, cr, uid, ids, website_id, context=None):
website_data = self.pool.get('website').read(cr, uid, [website_id], [], context=context)[0]
values = {'website_name': website_data['name']}
for fname, v in website_data.items():
if fname in self._columns:
values[fname] = v[0] if v and self._columns[fname]._type == 'many2one' else v
return {'value' : values}
# FIXME in trunk for god sake. Change the fields above to fields.char instead of fields.related,
# and create the function set_website who will set the value on the website_id
# create does not forward the values to the related many2one. Write does.
def create(self, cr, uid, vals, context=None):
config_id = super(website_config_settings, self).create(cr, uid, vals, context=context)
self.write(cr, uid, config_id, vals, context=context)
return config_id
_defaults = {
'website_id': lambda self,cr,uid,c: self.pool.get('website').search(cr, uid, [], context=c)[0],
}
|
unknown
|
codeparrot/codeparrot-clean
| ||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_PROFILING_ATRACE_PROFILER_H_
#define TENSORFLOW_LITE_PROFILING_ATRACE_PROFILER_H_
#include <memory>
#include "tensorflow/lite/core/api/profiler.h"
namespace tflite {
namespace profiling {
// Creates a profiler which reports the traced events to the Android ATrace.
// Nullptr will be returned if the Android system property 'debug.tflite.trace'
// is not set or the property value is not 1.
std::unique_ptr<tflite::Profiler> MaybeCreateATraceProfiler();
} // namespace profiling
} // namespace tflite
#endif // TENSORFLOW_LITE_PROFILING_ATRACE_PROFILER_H_
|
c
|
github
|
https://github.com/tensorflow/tensorflow
|
tensorflow/lite/profiling/atrace_profiler.h
|
import { test } from '../../assert';
export default test({
// Test that @html does not execute scripts when instantiated in the client.
// Needs to be in this test suite because JSDOM does not quite get this right.
mode: ['client'],
test({ window, assert }) {
// In here to give effects etc time to execute
assert.htmlEqual(
window.document.body.innerHTML,
`<main>
<div><script></script></div><script>document.body.innerHTML = 'this should not be executed'</script>
<script></script><script>document.body.innerHTML = 'this neither'</script>
</main>`
);
}
});
|
javascript
|
github
|
https://github.com/sveltejs/svelte
|
packages/svelte/tests/runtime-browser/samples/html-tag-script-2/_config.js
|
""" This is a module which exports a set of plotters based on
matplotlib. Yoy will need to have matplotlib installed using something
like:
apt-get install python-matplotlib
"""
import pyflag.DB as DB
import pyflag.Graph as Graph
try:
import matplotlib
matplotlib.use('Agg')
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import numpy as np
import matplotlib.image as image
import matplotlib.figure as figure
import StringIO
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import tempfile
except ImportError:
active = False
class LinePlot(Graph.GenericGraph):
name = 'Line Plot'
def form(self, query, result):
pass
def plot(self, gen, query, result):
fig = figure.Figure()
ax = fig.add_subplot(111)
x=[]
y=[]
for a,b in gen:
x.append(a)
y.append(b)
ax.plot(x,y , '.')
ax.grid()
## Make a temporary file name:
fd = tempfile.TemporaryFile()
canvas=FigureCanvas(fig)
canvas.print_figure(fd)
fd.seek(0)
result.generator.content_type = "image/png"
result.generator.generator = [ fd.read(), ]
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2013 Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from troveclient.v1 import client
from openstack_dashboard.api import base
from horizon.utils import functions as utils
LOG = logging.getLogger(__name__)
def troveclient(request):
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
trove_url = base.url_for(request, 'database')
LOG.debug('troveclient connection created using token "%s" and url "%s"' %
(request.user.token.id, trove_url))
c = client.Client(request.user.username,
request.user.token.id,
project_id=request.user.project_id,
auth_url=trove_url,
insecure=insecure,
cacert=cacert,
http_log_debug=settings.DEBUG)
c.client.auth_token = request.user.token.id
c.client.management_url = trove_url
return c
def instance_list(request, marker=None):
page_size = utils.get_page_size(request)
return troveclient(request).instances.list(limit=page_size, marker=marker)
def instance_get(request, instance_id):
return troveclient(request).instances.get(instance_id)
def instance_delete(request, instance_id):
return troveclient(request).instances.delete(instance_id)
def instance_create(request, name, volume, flavor, databases=None,
users=None, restore_point=None, nics=None,
datastore=None, datastore_version=None):
# TODO(dklyle): adding conditional to support trove without volume
# support for now until API supports checking for volume support
if volume > 0:
volume_params = {'size': volume}
else:
volume_params = None
return troveclient(request).instances.create(
name,
flavor,
volume=volume_params,
databases=databases,
users=users,
restorePoint=restore_point,
nics=nics,
datastore=datastore,
datastore_version=datastore_version)
def instance_resize_volume(request, instance_id, size):
return troveclient(request).instances.resize_volume(instance_id, size)
def instance_backups(request, instance_id):
return troveclient(request).instances.backups(instance_id)
def instance_restart(request, instance_id):
return troveclient(request).instances.restart(instance_id)
def database_list(request, instance_id):
return troveclient(request).databases.list(instance_id)
def database_delete(request, instance_id, db_name):
return troveclient(request).databases.delete(instance_id, db_name)
def backup_list(request):
return troveclient(request).backups.list()
def backup_get(request, backup_id):
return troveclient(request).backups.get(backup_id)
def backup_delete(request, backup_id):
return troveclient(request).backups.delete(backup_id)
def backup_create(request, name, instance_id, description=None,
parent_id=None):
return troveclient(request).backups.create(name, instance_id,
description, parent_id)
def flavor_list(request):
return troveclient(request).flavors.list()
def flavor_get(request, flavor_id):
return troveclient(request).flavors.get(flavor_id)
def users_list(request, instance_id):
return troveclient(request).users.list(instance_id)
def user_delete(request, instance_id, user):
return troveclient(request).users.delete(instance_id, user)
def user_list_access(request, instance_id, user):
return troveclient(request).users.list_access(instance_id, user)
def datastore_list(request):
return troveclient(request).datastores.list()
def datastore_version_list(request, datastore):
return troveclient(request).datastore_versions.list(datastore)
|
unknown
|
codeparrot/codeparrot-clean
| ||
<?php
// autoload_static.php @generated by Composer
namespace Composer\Autoload;
class ComposerStaticInit894a6ddbda1e609774926efb0f5a8fc2
{
public static $files = array (
'2962aeb5f454bcdf16ad79a35133cf35' => __DIR__ . '/..' . '/evil/pkg/exec.php',
);
public static $classMap = array (
'Composer\\InstalledVersions' => __DIR__ . '/..' . '/composer/InstalledVersions.php',
);
public static function getInitializer(ClassLoader $loader)
{
return \Closure::bind(function () use ($loader) {
$loader->classMap = ComposerStaticInit894a6ddbda1e609774926efb0f5a8fc2::$classMap;
}, null, ClassLoader::class);
}
}
|
php
|
github
|
https://github.com/composer/composer
|
tests/Composer/Test/Fixtures/functional/plugin-autoloading-only-loads-dependencies/vendor/composer/autoload_static.php
|
import datetime
import re
import calendar
from itertools import tee
from CronJob import CCronJob
from datetime import timedelta
class CCronPredictor:
""" predicts runs of cron"""
def __init__(self,minute,hour,day,month,dayOfWeek,length,command, includeRareJobs=False):
self.minute=minute
self.hour=hour
self.day=day
self.month=month
self.dayOfWeek=dayOfWeek
self.length=length
self.command=command
self.constantSpaces=True
self.spaceBetweenRuns=None
self.rareJob=False
#jsou v polozce dnu vsechny dny (*) ?
if(self.day=="*"):
self._allDays=True
else:
self._allDays=False
#jsou v polozce dnu v tydnu vsechny dny v tydnu (*) ?
if(self.dayOfWeek=="*"):
self._allDaysOfWeek=True
else:
self._allDaysOfWeek=False
#jsou v polozce dny nasobky ? (*/2)
if re.match('\*/[0-9]+', self.day):
self._multipleDays=True
else:
self._multipleDays=False
#vytvori aktualni cas
self.nextTime=datetime.datetime.now()
#nastavi sekundy a microsekundy na 0
self.nextTime=self.nextTime.replace(second=0,microsecond=0)
#vytvorim si startovni cas, pro vypocet prvni iterace
self.startTime=self.nextTime
#vyresetuju pristi cas na minimum (1. ledna 00:00)
self.nextTime=self.nextTime.replace(minute=0,hour=0,day=1,month=1)
#inicializace
self._makeSets(self.minute, self.hour, self.day, self.month, self.dayOfWeek)
self._isRare(includeRareJobs)
self._makeGens()
self._setFirstTime()
def _isRare(self, includeRareJobs):
if len(self.monthSet) < 6 and includeRareJobs:
self.monthSet=list(range(1,13))
def _makeMinuteSet(self,minute):
""" creates iterable sets filled with minutes """
#v mnozine budou vsechny minuty [0,1,2,...,59]
if minute=="*":
minuteSet=list(range(0,60))
#v mnozine bude jedno konkretni cislo [5]
elif re.match('^[0-9]+$', minute):
minuteSet=[int(minute)]
#v mnozine bude seznam cisel [0,1,15,25]
elif re.match('^([0-9]+,)+[0-9]+$', minute):
minuteSet=sorted(list(set(map(int,minute.split(',')))))
#v mnozine bude rozsah cisel [10,11,12,13,14,15]
elif re.match('^[0-9]+-[0-9]+$', minute):
fromTo=list(map(int,minute.split('-')))
minuteSet=list(range(fromTo[0],fromTo[1]+1))
#v mnozine budou cisla odpovidajici napr. "kazdych 5" = [0,5,10,...,55]
elif re.match('\*/[0-9]+', minute):
#inicializuju prazdny list
minuteSet=[]
#rozsekam zapis */5 na casti
line=minute.split('/')
#vyberu jen cast s cislem (jak casto, se to bude dit)
howOften=int(line[1])
#vytvorim si list s minutami od 0 do 59
allMinutes=list(range(0,60))
#projedu vsechny minuty a minuty, ktere splnuji kriteria pridam do vysledne mnoziny minut
for i in allMinutes:
if i%howOften == 0:
minuteSet.append(i)
#rozsah a modulo, napr: 0-20/5
elif re.match('^[0-9]+-[0-9]+/[0-9]+$', minute):
minuteSet=[]
line=minute.split("/")
howOften=int(line[1])
fromTo=list(map(int,line[0].split('-')))
allMinutes=list(range(fromTo[0],fromTo[1]+1))
for i in allMinutes:
if i%howOften == 0:
minuteSet.append(i)
#kombinace rozsahu: 10-15,20-15 nebo kombinace rozsahu a vyctu: 1,10-15,17,19
elif re.match('^([0-9]+(-[0-9]+)?)(,([0-9]+(-[0-9]+)?))*$',minute):
minuteSet=set()
line=minute.split(",")
for part in line:
if re.match('^[0-9]+-[0-9]+$', part):
fromTo=list(map(int,part.split('-')))
subRange=list(range(fromTo[0],fromTo[1]+1))
for i in subRange:
minuteSet.add(i)
else:
minuteSet.add(int(part))
minuteSet=sorted(list(minuteSet))
return minuteSet
def _makeDayOfWeekSet(self,dayOfWeek):
#v mnozine budou vsechny dny v tydnu [0,...,6]
if dayOfWeek=="*":
dayOfWeekSet=list(range(0,7))
#v mnozine bude jedno konkretni cislo [5]
elif re.match('^[0-7]$', dayOfWeek):
dayOfWeekSet=[int(dayOfWeek)]
#v mnozine bude seznam cisel [0,1,4,6]
elif re.match('^([0-7],)+[0-7]$', dayOfWeek):
dayOfWeekSet=sorted(list(set(map(int,dayOfWeek.split(',')))))
#v mnozine bude rozsah dnu v tydnu [0,1,2]
elif re.match('^[0-7]-[0-7]$', dayOfWeek):
fromTo=list(map(int,dayOfWeek.split('-')))
dayOfWeekSet=list(range(fromTo[0],fromTo[1]+1))
#v mnozine budou cisla odpovidajici napr. "kazdych 5" = [0,5,10,...,55]
elif re.match('\*/[0-9]+', dayOfWeek):
#inicializuju prazdny list
dayOfWeekSet=[]
#rozsekam zapis */5 na casti
line=dayOfWeek.split('/')
#vyberu jen cast s cislem (jak casto, se to bude dit)
howOften=int(line[1])
#vytvorim si list s dny v tydnu od 0 do 6
allDaysOfWeek=list(range(0,7))
#projedu vsechny dny v tydnu a dny v tydnu, ktere splnuji kriteria pridam do vysledne mnoziny dnu v tydnu
for i in allDaysOfWeek:
if i%howOften == 0:
dayOfWeekSet.append(i)
#rozsah a modulo, napr: 0-6/2
elif re.match('^[0-9]+-[0-9]+/[0-9]+$', dayOfWeek):
dayOfWeekSet=[]
line=dayOfWeek.split("/")
howOften=int(line[1])
fromTo=list(map(int,line[0].split('-')))
allMinutes=list(range(fromTo[0],fromTo[1]+1))
for i in allMinutes:
if i%howOften == 0:
dayOfWeekSet.append(i)
#kombinace rozsahu: 10-15,20-15 nebo kombinace rozsahu a vyctu: 1,10-15,17,19
elif re.match('^([0-9]+(-[0-9]+)?)(,([0-9]+(-[0-9]+)?))*$',dayOfWeek):
dayOfWeekSet=set()
line=dayOfWeek.split(",")
for part in line:
if re.match('^[0-9]+-[0-9]+$', part):
fromTo=list(map(int,part.split('-')))
subRange=list(range(fromTo[0],fromTo[1]+1))
for i in subRange:
dayOfWeekSet.add(i)
else:
dayOfWeekSet.add(int(part))
dayOfWeekSet=sorted(list(dayOfWeekSet))
return dayOfWeekSet
def _makeHourSet(self,hour):
#v mnozine budou vsechny hodiny [0,1,2,...,23]
if hour=="*":
hourSet=list(range(0,24))
#v mnozine bude jedno konkretni cislo [5]
elif re.match('^[0-9]+$', hour):
hourSet=[int(hour)]
#v mnozine bude seznam cisel [0,1,15,22]
elif re.match('^([0-9]+,)+[0-9]+$', hour):
hourSet=sorted(list(set(map(int,hour.split(',')))))
#v mnozine bude rozsah cisel [10,11,12,13,14,15]
elif re.match('^[0-9]+-[0-9]+$', hour):
fromTo=list(map(int,hour.split('-')))
hourSet=list(range(fromTo[0],fromTo[1]+1))
#v mnozine budou cisla odpovidajici napr. "kazdych 5" = [0,5,10,...,55]
elif re.match('\*/[0-9]+', hour):
#inicializuju prazdny list
hourSet=[]
#rozsekam zapis */5 na casti
line=hour.split('/')
#vyberu jen cast s cislem (jak casto, se to bude dit)
howOften=int(line[1])
#vytvorim si list s hodinami od 0 do 23
allHours=list(range(0,24))
#projedu vsechny hodiny a hodiny, ktere splnuji kriteria pridam do vysledne mnoziny hodin
for i in allHours:
if i%howOften == 0:
hourSet.append(i)
#rozsah a modulo, napr: 0-20/5
elif re.match('^[0-9]+-[0-9]+/[0-9]+$', hour):
hourSet=[]
line=hour.split("/")
howOften=int(line[1])
fromTo=list(map(int,line[0].split('-')))
allMinutes=list(range(fromTo[0],fromTo[1]+1))
for i in allMinutes:
if i%howOften == 0:
hourSet.append(i)
#kombinace rozsahu: 10-15,20-15 nebo kombinace rozsahu a vyctu: 1,10-15,17,19
elif re.match('^([0-9]+(-[0-9]+)?)(,([0-9]+(-[0-9]+)?))*$',hour):
hourSet=set()
line=hour.split(",")
for part in line:
if re.match('^[0-9]+-[0-9]+$', part):
fromTo=list(map(int,part.split('-')))
subRange=list(range(fromTo[0],fromTo[1]+1))
for i in subRange:
hourSet.add(i)
else:
hourSet.add(int(part))
hourSet=sorted(list(hourSet))
return hourSet
def _makeDaySet(self,day):
#v mnozine budou vsechny dny [1,2,...,31] nebo [1,...,28], atd...
if day=="*":
daySet=[]
#v mnozine bude jedno konkretni cislo [5]
elif re.match('^[0-9]+$', day):
daySet=[int(day)]
#v mnozine bude seznam cisel [0,1,15,25]
elif re.match('^([0-9]+,)+[0-9]+$', day):
daySet=sorted(list(set(map(int,day.split(',')))))
#v mnozine bude rozsah cisel [10,11,12,13,14,15]
elif re.match('^[0-9]+-[0-9]+$', day):
fromTo=list(map(int,day.split('-')))
daySet=list(range(fromTo[0],fromTo[1]+1))
#v mnozine budou cisla odpovidajici napr. "kazdych 5" = [0,5,10,...,55]
#dodela se pozdeji v zavislosti na danem mesici
elif re.match('\*/[0-9]+', day):
daySet=[]
#rozsah a modulo, napr: 0-20/5
elif re.match('^[0-9]+-[0-9]+/[0-9]+$', day):
daySet=[]
line=day.split("/")
howOften=int(line[1])
fromTo=list(map(int,line[0].split('-')))
allMinutes=list(range(fromTo[0],fromTo[1]+1))
for i in allMinutes:
if i%howOften == 1:
daySet.append(i)
#kombinace rozsahu: 10-15,20-15 nebo kombinace rozsahu a vyctu: 1,10-15,17,19
elif re.match('^([0-9]+(-[0-9]+)?)(,([0-9]+(-[0-9]+)?))*$',day):
daySet=set()
line=day.split(",")
for part in line:
if re.match('^[0-9]+-[0-9]+$', part):
fromTo=list(map(int,part.split('-')))
subRange=list(range(fromTo[0],fromTo[1]+1))
for i in subRange:
daySet.add(i)
else:
daySet.add(int(part))
daySet=sorted(list(daySet))
return daySet
def _makeDaySetAfter(self,day):
#inicializuju prazdny list
daySet=[]
#rozsekam zapis */5 na casti
line=day.split('/')
#vyberu jen cast s cislem (jak casto, se to bude dit)
howOften=int(line[1])
#vytvorim si list s dny, podle aktualniho mesice a roku
self._adjustDaySetByMonth()
allDays=self.daySet
#projedu vsechny dny a dny, ktere splnuji kriteria pridam do vysledne mnoziny dnu
for i in allDays:
if i%howOften == 1:
daySet.append(i)
return daySet
def _makeMonthSet(self,month):
#v mnozine budou vsechny mesice [1,2,...,12]
if month=="*":
monthSet=list(range(1,13))
#v mnozine bude jedno konkretni cislo [5]
elif re.match('^[0-9]+$', month):
monthSet=[int(month)]
#v mnozine bude seznam cisel [0,1,15,25]
elif re.match('^([0-9]+,)+[0-9]+$', month):
monthSet=sorted(list(set(map(int,month.split(',')))))
#v mnozine bude rozsah cisel [10,11,12,13,14,15]
elif re.match('^[0-9]+-[0-9]+$', month):
fromTo=list(map(int,month.split('-')))
monthSet=list(range(fromTo[0],fromTo[1]+1))
#v mnozine budou cisla odpovidajici napr. "kazdych 5" = [0,5,10,...,55]
elif re.match('\*/[0-9]+', month):
#inicializuju prazdny list
monthSet=[]
#rozsekam zapis */5 na casti
line=month.split('/')
#vyberu jen cast s cislem (jak casto, se to bude dit)
howOften=int(line[1])
#vytvorim si list s mesici od 1 do 12
allMonths=list(range(1,13))
#projedu vsechny mesice a mesice, ktere splnuji kriteria pridam do vysledne mnoziny mesicu
for i in allMonths:
if i%howOften == 1:
monthSet.append(i)
#rozsah a modulo, napr: 0-20/5
elif re.match('^[0-9]+-[0-9]+/[0-9]+$', month):
monthSet=[]
line=month.split("/")
howOften=int(line[1])
fromTo=list(map(int,line[0].split('-')))
allMinutes=list(range(fromTo[0],fromTo[1]+1))
for i in allMinutes:
if i%howOften == 1:
monthSet.append(i)
#kombinace rozsahu: 10-15,20-15 nebo kombinace rozsahu a vyctu: 1,10-15,17,19
elif re.match('^([0-9]+(-[0-9]+)?)(,([0-9]+(-[0-9]+)?))*$',month):
monthSet=set()
line=month.split(",")
for part in line:
if re.match('^[0-9]+-[0-9]+$', part):
fromTo=list(map(int,part.split('-')))
subRange=list(range(fromTo[0],fromTo[1]+1))
for i in subRange:
monthSet.add(i)
else:
monthSet.add(int(part))
monthSet=sorted(list(monthSet))
return monthSet
def _adjustDaySet(self,month,year):
if(month == 1 or month == 3 or month == 5 or month == 7 or month == 8 or month == 10 or month == 12):
#mesic ma 31 dni
daySet=list(range(1,32))
elif (month == 4 or month == 6 or month == 9 or month == 11):
#mesic ma 30 dni
daySet=list(range(1,31))
else:
#je to unor a prestupny rok = 29 dni
if (calendar.isleap(year)):
daySet=list(range(1,30))
#je to unor a neprestupny rok = 28 dni
else:
daySet=list(range(1,29))
return daySet
def _generateMinutes(self,minuteSet):
for i in minuteSet:
yield i
def _generateDayOfWeek(self,dayOfWeekSet):
for i in dayOfWeekSet:
yield i
def _generateHours(self,hourSet):
for i in hourSet:
yield i
def _generateDays(self,daySet):
for i in daySet:
yield i
def _generateMonths(self, monthSet):
for i in monthSet:
yield i
def _nextMinute(self):
self.nextTime=self.nextTime.replace(minute=next(self.minutes))
def _nextHour(self):
self.nextTime=self.nextTime.replace(hour=next(self.hours))
def _adjustDaySetByMonth(self, save=True):
#zkopiruju si generator mesicu
self.months, prevMonths = tee(self.months)
#zkusim se podivat na dalsi mesic v mnozine
try:
nextYear=self.nextTime.year
nextMonth=next(self.months)
#mnozina dosla
except StopIteration:
#vyresetuju generator
self.months=self._generateMonths(self.monthSet)
#nactu dalsi mesic
nextMonth=next(self.months)
#zvysim rok
nextYear=nextYear+1
#vratim generator do stavu pred posunutim
self.months=prevMonths
#upravim mnozinu dni pro nasledujici mesic
#bud ji ulozim do objektu nebo ji jen vratim
if save:
self.daySet=self._adjustDaySet(nextMonth, nextYear)
else:
return self._adjustDaySet(nextMonth, nextYear)
def _nextDay(self):
#den i den v tydnu jsou vsechny (*)
if self._allDays and self._allDaysOfWeek:
#posunu den podle vytvorene mnoziny dni
self.nextTime=self.nextTime.replace(day=next(self.days))
#dny jsou vsechny (*), dny v tydnu nejsou vsechny (konkretni den, vycet, rozsah)
elif self._allDays and not self._allDaysOfWeek:
#posunu den v tydnu podle vytvorene mnoziny dnu v tydnu
# => posun datum o +1 den dokud nebude splnovat aktualni den v tydnu z mnoziny
found=False
while True:
if found:
break
try:
self.nextTime=self.nextTime.replace(day=next(self.days))
self.daysOfWeek=self._generateDayOfWeek(self.dayOfWeekSet)
for dayOfWeek in self.daysOfWeek:
dayOfWeek=self._cron2python(dayOfWeek)
if self.nextTime.weekday() == dayOfWeek:
found=True
break
except StopIteration:
#uprav daySet pro nasledujici mesic, pokud je zadan kazdy den
if self._allDays:
self._adjustDaySetByMonth()
#nebo pokud byly zadany nasobky dnu
elif self._multipleDays:
self.daySet=self._makeDaySetAfter(self.day)
#vyresetuj mnozinu dni
self.days=self._generateDays(self.daySet)
#posun o den
self.nextTime=self.nextTime.replace(day=next(self.days))
#zkus posunout mesic
try:
self._nextMonth()
#jsme na konci mnoziny mesicu, bude se posouvat i rok
except StopIteration:
#vyresetuj mnozinu mesicu
self.months=self._generateMonths(self.monthSet)
#posun mesic
self._nextMonth()
#posun rok (mnozina roku neni, nemusi se nic hlidat)
self._nextYear()
self.daysOfWeek=self._generateDayOfWeek(self.dayOfWeekSet)
for dayOfWeek in self.daysOfWeek:
dayOfWeek=self._cron2python(dayOfWeek)
if self.nextTime.weekday() == dayOfWeek:
found=True
break
self._dateUsed=True
#dny nejsou vsechny (konkretni den, vycet, rozsah), dny v tydnu jsou vsechny (*)
elif not self._allDays and self._allDaysOfWeek:
#posunu den podle vytvorene mnoziny dni
self.nextTime=self.nextTime.replace(day=next(self.days))
#dny nejsou vsechny (konkretni den, vycet, rozsah) ani dny v tydnu nejsou vsechny (konkretni den, vycet, rozsah)
else:
#posunu oboje dokud nebude splnena aspon jedna podminka
#napr: 10.den v mesici a utery => pristi iterace bude v 10.den v mesici NEBO v utery (utery nemusi byt 10. den v mesici)
#raise NotImplementedError
found=False
while True:
if found:
break
try:
self.nextTime=self.nextTime.replace(day=next(self._allDaysGen))
#vyresetuj generator dnu v tydnu
self.daysOfWeek=self._generateDayOfWeek(self.dayOfWeekSet)
#projdi dny v tydnu a hledej shodu
for dayOfWeek in self.daysOfWeek:
dayOfWeek=self._cron2python(dayOfWeek)
if self.nextTime.weekday() == dayOfWeek:
found=True
break
#vyresetuj generator dnu v mesici
self.days=self._generateDays(self.daySet)
#projdi dny v mesici a hledej shodu
for day in self.days:
if self.nextTime.day == day:
found=True
break
except StopIteration:
#vytvor pomocnou mnozinu vsech dni pro pristi mesic
self._allDaySet=self._adjustDaySetByMonth(False)
#vyresetuj pomocnou mnozinu vsech dni
self._allDaysGen=self._generateDays(self._allDaySet)
#posun o den
self.nextTime=self.nextTime.replace(day=next(self._allDaysGen))
#zkus posunout mesic
try:
self._nextMonth()
#jsme na konci mnoziny mesicu, bude se posouvat i rok
except StopIteration:
#vyresetuj mnozinu mesicu
self.months=self._generateMonths(self.monthSet)
#posun mesic
self._nextMonth()
#posun rok (mnozina roku neni, nemusi se nic hlidat)
self._nextYear()
#vyresetuj generator dnu v tydnu
self.daysOfWeek=self._generateDayOfWeek(self.dayOfWeekSet)
#projdi dny v tydnu a hledej shodu
for dayOfWeek in self.daysOfWeek:
dayOfWeek=self._cron2python(dayOfWeek)
if self.nextTime.weekday() == dayOfWeek:
found=True
break
#vyresetuj generator dnu v mesici
self.days=self._generateDays(self.daySet)
#projdi dny v mesici a hledej shodu
for day in self.days:
if self.nextTime.day == day:
found=True
break
self._dateUsed=True
def _nextMonth(self):
try:
self.nextTime=self.nextTime.replace(month=next(self.months))
return True
except ValueError:
return False
def _nextYear(self):
currYear=self.nextTime.year
self.nextTime=self.nextTime.replace(year=currYear+1)
def _cron2python(self, dayOfWeek):
return (dayOfWeek+6)%7
def _makeSets(self, minute, hour, day, month, dayOfWeek):
#vytvori mnoziny
self.minuteSet=self._makeMinuteSet(minute)
self.hourSet=self._makeHourSet(hour)
self.monthSet=self._makeMonthSet(month)
self.daySet=self._makeDaySet(day)
self.dayOfWeekSet=self._makeDayOfWeekSet(dayOfWeek)
def _makeGens(self):
#vytvori generatory z mnozin
self.minutes=self._generateMinutes(self.minuteSet)
self.hours=self._generateHours(self.hourSet)
self.days=self._generateDays(self.daySet)
self.months=self._generateMonths(self.monthSet)
self.daysOfWeek=self._generateDayOfWeek(self.dayOfWeekSet)
#vytvoreni pomocne mnoziny vsech dni
if not self._allDays and not self._allDaysOfWeek:
self._allDaySet=self._adjustDaySetByMonth(False)
self._allDaysGen=self._generateDays(self._allDaySet)
#doupraveni mnoziny dni pro zapisy "*/5" atd. a vytvoreni prislusneho generatoru
if self._multipleDays:
self.daySet=self._makeDaySetAfter(self.day)
self.days=self._generateDays(self.daySet)
#douprave mnoziny dni pro zapisy "*" a vytvoreni prislusneho generatoru
if self._allDays:
#vytvor mnozinu dni podle nasledujiciho mesice a roku
self._adjustDaySetByMonth()
#vytvor generator
self.days=self._generateDays(self.daySet)
def _setFirstTime(self):
self._dateUsed=False
#inicializace generatoru
self._nextMinute()
self._nextHour()
self._nextMonth()
self._nextDay()
while ((self.nextTime<=self.startTime)):
self._dateUsed=False
#posun cas
self._predictNext()
self._dateUsed=False
def _predictNext(self):
try:
#zkus posunout minutu dal
self._nextMinute()
except StopIteration:
#dorazilo se na konec mnoziny, bude se posouvit i hodina
#vyresetuj generator minut
self.minutes=self._generateMinutes(self.minuteSet)
#posun opet minutu
self._nextMinute()
#zkus posunout i hodinu
try:
self._nextHour()
#jsme na konci mnoziny hodin, bude se posouvat i den
except StopIteration:
#vyresetuj mnozinu hodin
self.hours=self._generateHours(self.hourSet)
#posun hodinu
self._nextHour()
#zkus posunout den
try:
self._nextDay()
#jsme na konci mnoziny dni, bude se posouvat i mesic
except StopIteration:
#uprav daySet pro nasledujici mesic, pokud je zadan kazdy den
if self._allDays:
self._adjustDaySetByMonth()
#nebo pokud byly zadany nasobky dnu
elif self._multipleDays:
self.daySet=self._makeDaySetAfter(self.day)
#vyresetuj mnozinu dni
self.days=self._generateDays(self.daySet)
#posun den
self._nextDay()
#zkus posunout mesic
#pokud je den fixni, napr. 30 a mesic nema tolik dni (napr. unor), preskoc ho a zkus dalsi
try:
while not self._nextMonth():
pass
#jsme na konci mnoziny mesicu, bude se posouvat i rok
except StopIteration:
#vyresetuj mnozinu mesicu
self.months=self._generateMonths(self.monthSet)
#posun mesic
self._nextMonth()
#posun rok (mnozina roku neni, nemusi se nic hlidat)
self._nextYear()
def _printTime(self):
print(self.nextTime.strftime("%H:%M:%S %d.%m.%Y"))
def _printInterval(self,timeFrom,timeTo):
print(timeFrom.strftime("%H:%M:%S %d.%m.%Y")+" ---> "+timeTo.strftime("%H:%M:%S %d.%m.%Y"))
def _addTime(self,startTime,length):
lengthTimedelta=timedelta(seconds=length)
endTime=startTime+lengthTimedelta
return endTime
def iterate(self,n,cronJobList):
for _ in range(n):
endTime=self._addTime(self.nextTime, self.length)
cronJobList.append(CCronJob(self.nextTime,endTime))
#self._printInterval(self.nextTime, endTime)
self._predictNext()
self._dateUsed=False
return cronJobList
def iterateUntil(self,toDate,cronJobList):
prevDuration=None
while self.nextTime<=toDate:
prevTime=self.nextTime
endTime=self._addTime(self.nextTime, self.length)
cronJobList.append(CCronJob(self.nextTime,endTime))
#self._printInterval(self.nextTime, endTime)
self._predictNext()
duration = self.nextTime - prevTime
self.spaceBetweenRuns=duration
if prevDuration is not None and prevDuration != duration:
self.constantSpaces=False
self.spaceBetweenRuns=None
prevDuration = duration
self._dateUsed=False
return cronJobList
def test(self,name, n=100):
#otevri soubor pro zapis vystupu predikce cronu
file=open("/home/petr/git/Cron Analyzer/test/"+"output"+str(name),"w+")
for _ in range(n):
file.write(self.nextTime.strftime("%Y-%m-%d %H:%M:%S")+"\n")
self._predictNext()
self._dateUsed=False
file.close()
def getAllDays(self):
return self._allDays
def getAllDaysOfWeek(self):
return self._allDaysOfWeek
|
unknown
|
codeparrot/codeparrot-clean
| ||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright 2012, Seth Vidal <skvidal@fedoraproject.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
''' Create inventory hosts and groups in the memory inventory'''
### We need to be able to modify the inventory
BYPASS_HOST_LOOP = True
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=dict()):
if self._connection_info.check_mode:
return dict(skipped=True, msg='check mode not supported for this module')
# Parse out any hostname:port patterns
new_name = self._task.args.get('name', self._task.args.get('hostname', None))
#vv("creating host via 'add_host': hostname=%s" % new_name)
if ":" in new_name:
new_name, new_port = new_name.split(":")
self._task.args['ansible_ssh_port'] = new_port
groups = self._task.args.get('groupname', self._task.args.get('groups', self._task.args.get('group', '')))
# add it to the group if that was specified
new_groups = []
if groups:
for group_name in groups.split(","):
if group_name not in new_groups:
new_groups.append(group_name.strip())
# Add any variables to the new_host
host_vars = dict()
for k in self._task.args.keys():
if not k in [ 'name', 'hostname', 'groupname', 'groups' ]:
host_vars[k] = self._task.args[k]
return dict(changed=True, add_host=dict(host_name=new_name, groups=new_groups, host_vars=host_vars))
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Regroup variables for deprecated features.
To keep the OpenERP server backward compatible with older modules, some
additional code is needed throughout the core library. This module keeps
track of those specific measures by providing variables that can be unset
by the user to check if her code is future proof.
"""
# If True, the Python modules inside the openerp namespace are made available
# without the 'openerp.' prefix. E.g. openerp.osv.osv and osv.osv refer to the
# same module.
# Introduced around 2011.02.
open_openerp_namespace = True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# Corrections & modifications by Noviat nv/sa, (http://www.noviat.be):
# - VAT listing based upon year in stead of fiscal year
# - sql query adapted to select only 'tax-out' move lines
# - extra button to print readable PDF report
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import base64
from openerp.tools.translate import _
from openerp.osv import fields, osv
from openerp.report import report_sxw
class vat_listing_clients(osv.osv_memory):
_name = 'vat.listing.clients'
_columns = {
'name': fields.char('Client Name'),
'vat': fields.char('VAT'),
'turnover': fields.float('Base Amount'),
'vat_amount': fields.float('VAT Amount'),
}
class partner_vat(osv.osv_memory):
""" Vat Listing """
_name = "partner.vat"
def get_partner(self, cr, uid, ids, context=None):
context = dict(context or {})
obj_period = self.pool.get('account.period')
obj_partner = self.pool.get('res.partner')
obj_vat_lclient = self.pool.get('vat.listing.clients')
obj_model_data = self.pool.get('ir.model.data')
obj_module = self.pool.get('ir.module.module')
data = self.read(cr, uid, ids)[0]
year = data['year']
date_start = year + '-01-01'
date_stop = year + '-12-31'
if context.get('company_id', False):
company_id = context['company_id']
else:
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
period_ids = obj_period.search(cr, uid, [('date_start' ,'>=', date_start), ('date_stop','<=',date_stop), ('company_id','=',company_id)])
if not period_ids:
raise osv.except_osv(_('Insufficient Data!'), _('No data for the selected year.'))
partners = []
partner_ids = obj_partner.search(cr, uid, [('vat_subjected', '!=', False), ('vat','ilike','BE%')], context=context)
if not partner_ids:
raise osv.except_osv(_('Error'),_('No belgium contact with a VAT number in your database.'))
cr.execute("""SELECT sub1.partner_id, sub1.name, sub1.vat, sub1.turnover, sub2.vat_amount
FROM (SELECT l.partner_id, p.name, p.vat, SUM(CASE WHEN c.code ='49' THEN -l.tax_amount ELSE l.tax_amount END) as turnover
FROM account_move_line l
LEFT JOIN res_partner p ON l.partner_id = p.id
LEFT JOIN account_tax_code c ON l.tax_code_id = c.id
WHERE c.code IN ('00','01','02','03','45','49')
AND l.partner_id IN %s
AND l.period_id IN %s
GROUP BY l.partner_id, p.name, p.vat) AS sub1
LEFT JOIN (SELECT l2.partner_id, SUM(CASE WHEN c2.code ='64' THEN -l2.tax_amount ELSE l2.tax_amount END) as vat_amount
FROM account_move_line l2
LEFT JOIN account_tax_code c2 ON l2.tax_code_id = c2.id
WHERE c2.code IN ('54','64')
AND l2.partner_id IN %s
AND l2.period_id IN %s
GROUP BY l2.partner_id) AS sub2 ON sub1.partner_id = sub2.partner_id
""",(tuple(partner_ids),tuple(period_ids),tuple(partner_ids),tuple(period_ids)))
for record in cr.dictfetchall():
record['vat'] = record['vat'].replace(' ','').upper()
if record['turnover'] >= data['limit_amount']:
id_client = obj_vat_lclient.create(cr, uid, record, context=context)
partners.append(id_client)
if not partners:
raise osv.except_osv(_('Insufficient Data!'), _('No data found for the selected year.'))
context.update({'partner_ids': partners, 'year': data['year'], 'limit_amount': data['limit_amount']})
model_data_ids = obj_model_data.search(cr, uid, [('model','=','ir.ui.view'), ('name','=','view_vat_listing')])
resource_id = obj_model_data.read(cr, uid, model_data_ids, fields=['res_id'])[0]['res_id']
return {
'name': _('Vat Listing'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'partner.vat.list',
'views': [(resource_id,'form')],
'context': context,
'type': 'ir.actions.act_window',
'target': 'new',
}
_columns = {
'year': fields.char('Year', size=4, required=True),
'limit_amount': fields.integer('Limit Amount', required=True),
}
_defaults={
'year': lambda *a: str(int(time.strftime('%Y'))-1),
'limit_amount': 250,
}
class partner_vat_list(osv.osv_memory):
""" Partner Vat Listing """
_name = "partner.vat.list"
_columns = {
'partner_ids': fields.many2many('vat.listing.clients', 'vat_partner_rel', 'vat_id', 'partner_id', 'Clients', help='You can remove clients/partners which you do not want to show in xml file'),
'name': fields.char('File Name'),
'file_save' : fields.binary('Save File', readonly=True),
'comments': fields.text('Comments'),
}
def _get_partners(self, cr, uid, context=None):
return context.get('partner_ids', [])
_defaults={
'partner_ids': _get_partners,
}
def _get_datas(self, cr, uid, ids, context=None):
obj_vat_lclient = self.pool.get('vat.listing.clients')
datas = []
data = self.read(cr, uid, ids)[0]
for partner in data['partner_ids']:
if isinstance(partner, list) and partner:
datas.append(partner[2])
else:
client_data = obj_vat_lclient.read(cr, uid, partner, context=context)
datas.append(client_data)
client_datas = []
seq = 0
sum_tax = 0.00
sum_turnover = 0.00
amount_data = {}
for line in datas:
if not line:
continue
seq += 1
sum_tax += line['vat_amount']
sum_turnover += line['turnover']
vat = line['vat'].replace(' ','').upper()
amount_data ={
'seq': str(seq),
'vat': vat,
'only_vat': vat[2:],
'turnover': '%.2f' %line['turnover'],
'vat_amount': '%.2f' %line['vat_amount'],
'sum_tax': '%.2f' %sum_tax,
'sum_turnover': '%.2f' %sum_turnover,
'partner_name': line['name'],
}
client_datas += [amount_data]
return client_datas
def create_xml(self, cr, uid, ids, context=None):
obj_sequence = self.pool.get('ir.sequence')
obj_users = self.pool.get('res.users')
obj_partner = self.pool.get('res.partner')
obj_model_data = self.pool.get('ir.model.data')
seq_declarantnum = obj_sequence.get(cr, uid, 'declarantnum')
obj_cmpny = obj_users.browse(cr, uid, uid, context=context).company_id
company_vat = obj_cmpny.partner_id.vat
if not company_vat:
raise osv.except_osv(_('Insufficient Data!'),_('No VAT number associated with the company.'))
company_vat = company_vat.replace(' ','').upper()
SenderId = company_vat[2:]
issued_by = company_vat[:2]
seq_declarantnum = obj_sequence.get(cr, uid, 'declarantnum')
dnum = company_vat[2:] + seq_declarantnum[-4:]
street = city = country = ''
addr = obj_partner.address_get(cr, uid, [obj_cmpny.partner_id.id], ['invoice'])
if addr.get('invoice',False):
ads = obj_partner.browse(cr, uid, [addr['invoice']], context=context)[0]
phone = ads.phone and ads.phone.replace(' ','') or ''
email = ads.email or ''
name = ads.name or ''
city = ads.city or ''
zip = obj_partner.browse(cr, uid, ads.id, context=context).zip or ''
if not city:
city = ''
if ads.street:
street = ads.street
if ads.street2:
street += ' ' + ads.street2
if ads.country_id:
country = ads.country_id.code
data = self.read(cr, uid, ids)[0]
comp_name = obj_cmpny.name
if not email:
raise osv.except_osv(_('Insufficient Data!'),_('No email address associated with the company.'))
if not phone:
raise osv.except_osv(_('Insufficient Data!'),_('No phone associated with the company.'))
annual_listing_data = {
'issued_by': issued_by,
'company_vat': company_vat,
'comp_name': comp_name,
'street': street,
'zip': zip,
'city': city,
'country': country,
'email': email,
'phone': phone,
'SenderId': SenderId,
'period': context['year'],
'comments': data['comments'] or ''
}
data_file = """<?xml version="1.0" encoding="ISO-8859-1"?>
<ns2:ClientListingConsignment xmlns="http://www.minfin.fgov.be/InputCommon" xmlns:ns2="http://www.minfin.fgov.be/ClientListingConsignment" ClientListingsNbr="1">
<ns2:Representative>
<RepresentativeID identificationType="NVAT" issuedBy="%(issued_by)s">%(SenderId)s</RepresentativeID>
<Name>%(comp_name)s</Name>
<Street>%(street)s</Street>
<PostCode>%(zip)s</PostCode>
<City>%(city)s</City>"""
if annual_listing_data['country']:
data_file +="\n\t\t<CountryCode>%(country)s</CountryCode>"
data_file += """
<EmailAddress>%(email)s</EmailAddress>
<Phone>%(phone)s</Phone>
</ns2:Representative>"""
data_file = data_file % annual_listing_data
data_comp = """
<ns2:Declarant>
<VATNumber>%(SenderId)s</VATNumber>
<Name>%(comp_name)s</Name>
<Street>%(street)s</Street>
<PostCode>%(zip)s</PostCode>
<City>%(city)s</City>
<CountryCode>%(country)s</CountryCode>
<EmailAddress>%(email)s</EmailAddress>
<Phone>%(phone)s</Phone>
</ns2:Declarant>
<ns2:Period>%(period)s</ns2:Period>
""" % annual_listing_data
# Turnover and Farmer tags are not included
client_datas = self._get_datas(cr, uid, ids, context=context)
if not client_datas:
raise osv.except_osv(_('Data Insufficient!'),_('No data available for the client.'))
data_client_info = ''
for amount_data in client_datas:
data_client_info += """
<ns2:Client SequenceNumber="%(seq)s">
<ns2:CompanyVATNumber issuedBy="BE">%(only_vat)s</ns2:CompanyVATNumber>
<ns2:TurnOver>%(turnover)s</ns2:TurnOver>
<ns2:VATAmount>%(vat_amount)s</ns2:VATAmount>
</ns2:Client>""" % amount_data
amount_data_begin = client_datas[-1]
amount_data_begin.update({'dnum':dnum})
data_begin = """
<ns2:ClientListing SequenceNumber="1" ClientsNbr="%(seq)s" DeclarantReference="%(dnum)s"
TurnOverSum="%(sum_turnover)s" VATAmountSum="%(sum_tax)s">
""" % amount_data_begin
data_end = """
<ns2:Comment>%(comments)s</ns2:Comment>
</ns2:ClientListing>
</ns2:ClientListingConsignment>
""" % annual_listing_data
data_file += data_begin + data_comp + data_client_info + data_end
file_save = base64.encodestring(data_file.encode('utf8'))
self.write(cr, uid, ids, {'file_save':file_save, 'name':'vat_list.xml'}, context=context)
model_data_ids = obj_model_data.search(cr, uid, [('model','=','ir.ui.view'), ('name','=','view_vat_listing_result')])
resource_id = obj_model_data.read(cr, uid, model_data_ids, fields=['res_id'])[0]['res_id']
return {
'name': _('XML File has been Created'),
'res_id': ids[0],
'view_type': 'form',
'view_mode': 'form',
'res_model': 'partner.vat.list',
'views': [(resource_id,'form')],
'context': context,
'type': 'ir.actions.act_window',
'target': 'new',
}
def print_vatlist(self, cr, uid, ids, context=None):
if context is None:
context = {}
datas = {'ids': []}
datas['model'] = 'res.company'
datas['year'] = context['year']
datas['limit_amount'] = context['limit_amount']
datas['client_datas'] = self._get_datas(cr, uid, ids, context=context)
if not datas['client_datas']:
raise osv.except_osv(_('Error!'), _('No record to print.'))
return self.pool['report'].get_action(
cr, uid, [], 'l10n_be.report_l10nvatpartnerlisting', data=datas, context=context
)
class partner_vat_listing_print(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(partner_vat_listing_print, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'time': time,
})
def set_context(self, objects, data, ids, report_type=None):
client_datas = data['client_datas']
self.localcontext.update( {
'year': data['year'],
'sum_turnover': client_datas[-1]['sum_turnover'],
'sum_tax': client_datas[-1]['sum_tax'],
'client_list': client_datas,
})
super(partner_vat_listing_print, self).set_context(objects, data, ids)
class wrapped_vat_listing_print(osv.AbstractModel):
_name = 'report.l10n_be.report_l10nvatpartnerlisting'
_inherit = 'report.abstract_report'
_template = 'l10n_be.report_l10nvatpartnerlisting'
_wrapped_report_class = partner_vat_listing_print
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
unknown
|
codeparrot/codeparrot-clean
| ||
# This suite combines testing for change_streams_whole_db_mongos_passthrough,
# change_streams_whole_db_secondary_reads_passthrough, and
# change_streams_whole_db_sharded_collections_passthrough while including the change stream v2 override.
# If part of this suite fails, you can comment out individual parts or run the above tests
# to dig deeper into what is failing.
base_suite: change_streams_whole_db_passthrough
description: >-
This suite is a combination of change_streams_whole_db_mongos_passthrough,
change_streams_whole_db_secondary_reads_passthrough, and
change_streams_whole_db_sharded_collections_passthrough.
You can run any of these tests individually to debug any issues that might arrise.
overrides:
- "change_streams.mongos_passthrough"
- "change_streams.secondary_reads"
- "change_streams.base_eval"
eval:
- "change_streams.secondary_reads_eval"
- "change_streams.sharded_collections_passthrough_eval"
- "change_streams.whole_db_eval"
- "change_streams.change_streams_v2_eval"
#uncomment this line if you comment out the secondary reads portion of this file
# - "change_streams.causal_consistency"
excludes:
- "change_streams.mongos_passthrough_excludes"
- "change_streams.sharded_collections_passthrough_excludes"
- "change_streams.secondary_reads_excludes"
- "change_streams.change_streams_v2_excludes"
|
unknown
|
github
|
https://github.com/mongodb/mongo
|
buildscripts/resmokeconfig/matrix_suites/mappings/change_streams_whole_db_secondary_reads_sharded_collections_v2.yml
|
#!/bin/sh
XMALLOC_STR=""
if [ "x${enable_xmalloc}" = "x1" ] ; then
XMALLOC_STR="xmalloc:false,"
fi
export MALLOC_CONF="${XMALLOC_STR}experimental_infallible_new:true"
|
unknown
|
github
|
https://github.com/redis/redis
|
deps/jemalloc/test/integration/cpp/infallible_new_true.sh
|
/*
* Copyright (C) 2011 John Szakmeister <john@szakmeister.net>
* 2012 Philipp A. Hartmann <pah@qo.cx>
* 2016 Mantas Mikulėnas <grawity@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
* Credits:
* - GNOME Keyring API handling originally written by John Szakmeister
* - ported to credential helper API by Philipp A. Hartmann
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <glib.h>
#include <libsecret/secret.h>
/*
* This credential struct and API is simplified from git's credential.{h,c}
*/
struct credential {
char *protocol;
char *host;
unsigned short port;
char *path;
char *username;
char *password;
char *password_expiry_utc;
char *oauth_refresh_token;
};
#define CREDENTIAL_INIT { 0 }
typedef int (*credential_op_cb)(struct credential *);
struct credential_operation {
char *name;
credential_op_cb op;
};
#define CREDENTIAL_OP_END { NULL, NULL }
static void credential_clear(struct credential *c);
/* ----------------- Secret Service functions ----------------- */
static const SecretSchema schema = {
.name = "org.git.Password",
/* Ignore schema name during search for backwards compatibility */
.flags = SECRET_SCHEMA_DONT_MATCH_NAME,
.attributes = {
/*
* libsecret assumes attribute values are non-confidential and
* unchanging, so we can't include oauth_refresh_token or
* password_expiry_utc.
*/
{ "user", SECRET_SCHEMA_ATTRIBUTE_STRING },
{ "object", SECRET_SCHEMA_ATTRIBUTE_STRING },
{ "protocol", SECRET_SCHEMA_ATTRIBUTE_STRING },
{ "port", SECRET_SCHEMA_ATTRIBUTE_INTEGER },
{ "server", SECRET_SCHEMA_ATTRIBUTE_STRING },
{ NULL, 0 },
}
};
static char *make_label(struct credential *c)
{
if (c->port)
return g_strdup_printf("Git: %s://%s:%hu/%s",
c->protocol, c->host, c->port, c->path ? c->path : "");
else
return g_strdup_printf("Git: %s://%s/%s",
c->protocol, c->host, c->path ? c->path : "");
}
static GHashTable *make_attr_list(struct credential *c)
{
GHashTable *al = g_hash_table_new_full(g_str_hash, g_str_equal, NULL, g_free);
if (c->username)
g_hash_table_insert(al, "user", g_strdup(c->username));
if (c->protocol)
g_hash_table_insert(al, "protocol", g_strdup(c->protocol));
if (c->host)
g_hash_table_insert(al, "server", g_strdup(c->host));
if (c->port)
g_hash_table_insert(al, "port", g_strdup_printf("%hu", c->port));
if (c->path)
g_hash_table_insert(al, "object", g_strdup(c->path));
return al;
}
static int keyring_get(struct credential *c)
{
SecretService *service = NULL;
GHashTable *attributes = NULL;
GError *error = NULL;
GList *items = NULL;
if (!c->protocol || !(c->host || c->path))
return EXIT_FAILURE;
service = secret_service_get_sync(0, NULL, &error);
if (error != NULL) {
g_critical("could not connect to Secret Service: %s", error->message);
g_error_free(error);
return EXIT_FAILURE;
}
attributes = make_attr_list(c);
items = secret_service_search_sync(service,
&schema,
attributes,
SECRET_SEARCH_LOAD_SECRETS | SECRET_SEARCH_UNLOCK,
NULL,
&error);
g_hash_table_unref(attributes);
if (error != NULL) {
g_critical("lookup failed: %s", error->message);
g_error_free(error);
return EXIT_FAILURE;
}
if (items != NULL) {
SecretItem *item;
SecretValue *secret;
const char *s;
gchar **parts;
item = items->data;
secret = secret_item_get_secret(item);
attributes = secret_item_get_attributes(item);
s = g_hash_table_lookup(attributes, "user");
if (s) {
g_free(c->username);
c->username = g_strdup(s);
}
s = secret_value_get_text(secret);
if (s) {
/*
* Passwords and other attributes encoded in following format:
* hunter2
* password_expiry_utc=1684189401
* oauth_refresh_token=xyzzy
*/
parts = g_strsplit(s, "\n", 0);
if (g_strv_length(parts) >= 1) {
g_free(c->password);
c->password = g_strdup(parts[0]);
} else {
g_free(c->password);
c->password = g_strdup("");
}
for (guint i = 1; i < g_strv_length(parts); i++) {
if (g_str_has_prefix(parts[i], "password_expiry_utc=")) {
g_free(c->password_expiry_utc);
c->password_expiry_utc = g_strdup(&parts[i][20]);
} else if (g_str_has_prefix(parts[i], "oauth_refresh_token=")) {
g_free(c->oauth_refresh_token);
c->oauth_refresh_token = g_strdup(&parts[i][20]);
}
}
g_strfreev(parts);
}
g_hash_table_unref(attributes);
secret_value_unref(secret);
g_list_free_full(items, g_object_unref);
}
return EXIT_SUCCESS;
}
static int keyring_store(struct credential *c)
{
char *label = NULL;
GHashTable *attributes = NULL;
GError *error = NULL;
GString *secret = NULL;
/*
* Sanity check that what we are storing is actually sensible.
* In particular, we can't make a URL without a protocol field.
* Without either a host or pathname (depending on the scheme),
* we have no primary key. And without a username and password,
* we are not actually storing a credential.
*/
if (!c->protocol || !(c->host || c->path) ||
!c->username || !c->password)
return EXIT_FAILURE;
label = make_label(c);
attributes = make_attr_list(c);
secret = g_string_new(c->password);
if (c->password_expiry_utc) {
g_string_append_printf(secret, "\npassword_expiry_utc=%s",
c->password_expiry_utc);
}
if (c->oauth_refresh_token) {
g_string_append_printf(secret, "\noauth_refresh_token=%s",
c->oauth_refresh_token);
}
secret_password_storev_sync(&schema,
attributes,
NULL,
label,
secret->str,
NULL,
&error);
g_string_free(secret, TRUE);
g_free(label);
g_hash_table_unref(attributes);
if (error != NULL) {
g_critical("store failed: %s", error->message);
g_error_free(error);
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
static int keyring_erase(struct credential *c)
{
GHashTable *attributes = NULL;
GError *error = NULL;
struct credential existing = CREDENTIAL_INIT;
/*
* Sanity check that we actually have something to match
* against. The input we get is a restrictive pattern,
* so technically a blank credential means "erase everything".
* But it is too easy to accidentally send this, since it is equivalent
* to empty input. So explicitly disallow it, and require that the
* pattern have some actual content to match.
*/
if (!c->protocol && !c->host && !c->path && !c->username)
return EXIT_FAILURE;
if (c->password) {
existing.host = g_strdup(c->host);
existing.path = g_strdup(c->path);
existing.port = c->port;
existing.protocol = g_strdup(c->protocol);
existing.username = g_strdup(c->username);
keyring_get(&existing);
if (existing.password && strcmp(c->password, existing.password)) {
credential_clear(&existing);
return EXIT_SUCCESS;
}
credential_clear(&existing);
}
attributes = make_attr_list(c);
secret_password_clearv_sync(&schema,
attributes,
NULL,
&error);
g_hash_table_unref(attributes);
if (error != NULL) {
g_critical("erase failed: %s", error->message);
g_error_free(error);
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
/*
* Table with helper operation callbacks, used by generic
* credential helper main function.
*/
static struct credential_operation const credential_helper_ops[] = {
{ "get", keyring_get },
{ "store", keyring_store },
{ "erase", keyring_erase },
CREDENTIAL_OP_END
};
/* ------------------ credential functions ------------------ */
static void credential_init(struct credential *c)
{
memset(c, 0, sizeof(*c));
}
static void credential_clear(struct credential *c)
{
g_free(c->protocol);
g_free(c->host);
g_free(c->path);
g_free(c->username);
g_free(c->password);
g_free(c->password_expiry_utc);
g_free(c->oauth_refresh_token);
credential_init(c);
}
static int credential_read(struct credential *c)
{
char *buf = NULL;
size_t alloc;
ssize_t line_len;
char *key;
char *value;
while ((line_len = getline(&buf, &alloc, stdin)) > 0) {
key = buf;
if (buf[line_len-1] == '\n')
buf[--line_len] = '\0';
if (!line_len)
break;
value = strchr(buf, '=');
if (!value) {
g_warning("invalid credential line: %s", key);
g_free(buf);
return -1;
}
*value++ = '\0';
if (!strcmp(key, "protocol")) {
g_free(c->protocol);
c->protocol = g_strdup(value);
} else if (!strcmp(key, "host")) {
g_free(c->host);
c->host = g_strdup(value);
value = strrchr(c->host, ':');
if (value) {
*value++ = '\0';
c->port = atoi(value);
}
} else if (!strcmp(key, "path")) {
g_free(c->path);
c->path = g_strdup(value);
} else if (!strcmp(key, "username")) {
g_free(c->username);
c->username = g_strdup(value);
} else if (!strcmp(key, "password_expiry_utc")) {
g_free(c->password_expiry_utc);
c->password_expiry_utc = g_strdup(value);
} else if (!strcmp(key, "password")) {
g_free(c->password);
c->password = g_strdup(value);
while (*value)
*value++ = '\0';
} else if (!strcmp(key, "oauth_refresh_token")) {
g_free(c->oauth_refresh_token);
c->oauth_refresh_token = g_strdup(value);
while (*value)
*value++ = '\0';
}
/*
* Ignore other lines; we don't know what they mean, but
* this future-proofs us when later versions of git do
* learn new lines, and the helpers are updated to match.
*/
}
free(buf);
return 0;
}
static void credential_write_item(FILE *fp, const char *key, const char *value)
{
if (!value)
return;
fprintf(fp, "%s=%s\n", key, value);
}
static void credential_write(const struct credential *c)
{
/* only write username/password, if set */
credential_write_item(stdout, "username", c->username);
credential_write_item(stdout, "password", c->password);
credential_write_item(stdout, "password_expiry_utc",
c->password_expiry_utc);
credential_write_item(stdout, "oauth_refresh_token",
c->oauth_refresh_token);
}
static void usage(const char *name)
{
struct credential_operation const *try_op = credential_helper_ops;
const char *basename = strrchr(name, '/');
basename = (basename) ? basename + 1 : name;
fprintf(stderr, "usage: %s <", basename);
while (try_op->name) {
fprintf(stderr, "%s", (try_op++)->name);
if (try_op->name)
fprintf(stderr, "%s", "|");
}
fprintf(stderr, "%s", ">\n");
}
int main(int argc, char *argv[])
{
int ret = EXIT_SUCCESS;
struct credential_operation const *try_op = credential_helper_ops;
struct credential cred = CREDENTIAL_INIT;
if (argc < 2 || !*argv[1]) {
usage(argv[0]);
exit(EXIT_FAILURE);
}
g_set_application_name("Git Credential Helper");
/* lookup operation callback */
while (try_op->name && strcmp(argv[1], try_op->name))
try_op++;
/* unsupported operation given -- ignore silently */
if (!try_op->name || !try_op->op)
goto out;
ret = credential_read(&cred);
if (ret)
goto out;
/* perform credential operation */
ret = (*try_op->op)(&cred);
credential_write(&cred);
out:
credential_clear(&cred);
return ret;
}
|
c
|
github
|
https://github.com/git/git
|
contrib/credential/libsecret/git-credential-libsecret.c
|
from datetime import datetime
from datetime import timedelta
import pytest
from cfme import test_requirements
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.blockers import BZ
from cfme.utils.wait import wait_for
pytestmark = [
pytest.mark.tier(3),
test_requirements.c_and_u,
pytest.mark.usefixtures('setup_provider'),
pytest.mark.provider([VMwareProvider],
scope='module',
required_fields=[(['cap_and_util', 'capandu_vm'], 'cu-24x7')]),
pytest.mark.meta(blockers=[BZ(1635126, forced_streams=['5.10'])])
]
ELEMENTS = ['vm', 'host']
GRAPH_TYPE = ['hourly', 'daily']
@pytest.fixture(scope='module')
def order_data(appliance, provider, enable_candu):
# Order two day back gap collection data for testing
end_date = datetime.now()
start_date = end_date - timedelta(days=2)
view = navigate_to(appliance.server.zone, 'CANDUGapCollection')
view.candugapcollection.fill({'end_date': end_date,
'start_date': start_date})
view.candugapcollection.submit.click()
@pytest.mark.parametrize('graph_type', GRAPH_TYPE)
@pytest.mark.parametrize('element', ELEMENTS)
def test_gap_collection(appliance, provider, element, graph_type, order_data):
""" Test gap collection data
prerequisites:
* C&U enabled appliance
Steps:
* Navigate to Configuration > Diagnostics > Zone Gap Collection Page
* Order old data
* Navigate to VM or Host Utilization page
* Check for Hourly data
* Check for Daily data
Polarion:
assignee: nachandr
casecomponent: CandU
caseimportance: medium
initialEstimate: 1/4h
"""
if element == 'host':
collection = appliance.collections.hosts
for test_host in provider.data['hosts']:
if not test_host.get('test_fleece', False):
continue
element = collection.instantiate(name=test_host.name, provider=provider)
elif element == 'vm':
collection = appliance.provider_based_collection(provider)
element = collection.instantiate('cu-24x7', provider)
date = datetime.now() - timedelta(days=1)
element.wait_candu_data_available(timeout=1200)
view = navigate_to(element, 'candu')
view.options.interval.fill(graph_type.capitalize())
try:
graph = getattr(view, 'vm_cpu')
except AttributeError:
graph = getattr(view.interval_type, 'host_cpu')
assert graph.is_displayed
def refresh():
provider.browser.refresh()
view = navigate_to(element, 'candu')
view.options.interval.fill(graph_type.capitalize())
# wait, some time graph took time to load
wait_for(lambda: len(graph.all_legends) > 0,
delay=5, timeout=600, fail_func=refresh)
# check collected data for cpu graph
view.options.calendar.fill(date)
graph_data = 0
for leg in graph.all_legends:
graph.display_legends(leg)
for data in graph.data_for_legends(leg).values():
graph_data += float(data[leg].replace(',', '').replace('%', '').split()[0])
assert graph_data > 0
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
config = Script.get_config()
knox_pid_dir = config['configurations']['knox-env']['knox_pid_dir']
knox_pid_file = format("{knox_pid_dir}/gateway.pid")
ldap_pid_file = format("{knox_pid_dir}/ldap.pid")
#if OSCheck.is_windows_family():
# knox_gateway_win_service_name = "gateway"
# knox_ldap_win_service_name = "ldap"
#else:
knox_conf_dir = '/etc/knox/conf'
# if Script.is_stack_greater_or_equal("2.2"):
knox_conf_dir = '/usr/iop/current/knox-server/conf'
# knox_pid_dir = config['configurations']['knox-env']['knox_pid_dir']
# knox_pid_file = format("{knox_pid_dir}/gateway.pid")
# ldap_pid_file = format("{knox_pid_dir}/ldap.pid")
security_enabled = config['configurations']['cluster-env']['security_enabled']
if security_enabled:
knox_keytab_path = config['configurations']['knox-env']['knox_keytab_path']
knox_principal_name = config['configurations']['knox-env']['knox_principal_name']
else:
knox_keytab_path = None
knox_principal_name = None
hostname = config['hostname'].lower()
knox_user = default("/configurations/knox-env/knox_user", "knox")
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
temp_dir = Script.get_tmp_dir()
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.build.bom.bomr.version;
import org.junit.jupiter.api.Test;
import static org.assertj.core.api.Assertions.assertThat;
/**
* Tests for {@link ArtifactVersionDependencyVersion}.
*
* @author Andy Wilkinson
*/
class ArtifactVersionDependencyVersionTests {
@Test
void parseWhenVersionIsNotAMavenVersionShouldReturnNull() {
assertThat(version("1.2.3.1")).isNull();
}
@Test
void parseWhenVersionIsAMavenVersionShouldReturnAVersion() {
assertThat(version("1.2.3")).isNotNull();
}
@Test
void isSameMajorWhenSameMajorAndMinorShouldReturnTrue() {
assertThat(version("1.10.2").isSameMajor(version("1.10.0"))).isTrue();
}
@Test
void isSameMajorWhenSameMajorShouldReturnTrue() {
assertThat(version("1.10.2").isSameMajor(version("1.9.0"))).isTrue();
}
@Test
void isSameMajorWhenDifferentMajorShouldReturnFalse() {
assertThat(version("2.0.2").isSameMajor(version("1.9.0"))).isFalse();
}
@Test
void isSameMinorWhenSameMinorShouldReturnTrue() {
assertThat(version("1.10.2").isSameMinor(version("1.10.1"))).isTrue();
}
@Test
void isSameMinorWhenDifferentMinorShouldReturnFalse() {
assertThat(version("1.10.2").isSameMinor(version("1.9.1"))).isFalse();
}
@Test
void isSnapshotForWhenSnapshotForReleaseShouldReturnTrue() {
assertThat(version("1.10.2-SNAPSHOT").isSnapshotFor(version("1.10.2"))).isTrue();
}
@Test
void isSnapshotForWhenBuildSnapshotForReleaseShouldReturnTrue() {
assertThat(version("1.10.2.BUILD-SNAPSHOT").isSnapshotFor(version("1.10.2.RELEASE"))).isTrue();
}
@Test
void isSnapshotForWhenSnapshotForReleaseCandidateShouldReturnTrue() {
assertThat(version("1.10.2-SNAPSHOT").isSnapshotFor(version("1.10.2-RC2"))).isTrue();
}
@Test
void isSnapshotForWhenBuildSnapshotForReleaseCandidateShouldReturnTrue() {
assertThat(version("1.10.2.BUILD-SNAPSHOT").isSnapshotFor(version("1.10.2.RC2"))).isTrue();
}
@Test
void isSnapshotForWhenSnapshotForMilestoneShouldReturnTrue() {
assertThat(version("1.10.2-SNAPSHOT").isSnapshotFor(version("1.10.2-M1"))).isTrue();
}
@Test
void isSnapshotForWhenBuildSnapshotForMilestoneShouldReturnTrue() {
assertThat(version("1.10.2.BUILD-SNAPSHOT").isSnapshotFor(version("1.10.2.M1"))).isTrue();
}
@Test
void isSnapshotForWhenSnapshotForDifferentReleaseShouldReturnFalse() {
assertThat(version("1.10.1-SNAPSHOT").isSnapshotFor(version("1.10.2"))).isFalse();
}
@Test
void isSnapshotForWhenBuildSnapshotForDifferentReleaseShouldReturnTrue() {
assertThat(version("1.10.1.BUILD-SNAPSHOT").isSnapshotFor(version("1.10.2.RELEASE"))).isFalse();
}
@Test
void isSnapshotForWhenSnapshotForDifferentReleaseCandidateShouldReturnTrue() {
assertThat(version("1.10.1-SNAPSHOT").isSnapshotFor(version("1.10.2-RC2"))).isFalse();
}
@Test
void isSnapshotForWhenBuildSnapshotForDifferentReleaseCandidateShouldReturnTrue() {
assertThat(version("1.10.1.BUILD-SNAPSHOT").isSnapshotFor(version("1.10.2.RC2"))).isFalse();
}
@Test
void isSnapshotForWhenSnapshotForDifferentMilestoneShouldReturnTrue() {
assertThat(version("1.10.1-SNAPSHOT").isSnapshotFor(version("1.10.2-M1"))).isFalse();
}
@Test
void isSnapshotForWhenBuildSnapshotForDifferentMilestoneShouldReturnTrue() {
assertThat(version("1.10.1.BUILD-SNAPSHOT").isSnapshotFor(version("1.10.2.M1"))).isFalse();
}
@Test
void isSnapshotForWhenNotSnapshotShouldReturnFalse() {
assertThat(version("1.10.1-M1").isSnapshotFor(version("1.10.1"))).isFalse();
}
private ArtifactVersionDependencyVersion version(String version) {
return ArtifactVersionDependencyVersion.parse(version);
}
}
|
java
|
github
|
https://github.com/spring-projects/spring-boot
|
buildSrc/src/test/java/org/springframework/boot/build/bom/bomr/version/ArtifactVersionDependencyVersionTests.java
|
from django.apps.registry import Apps
from django.db import models
from django.db.utils import DatabaseError
from django.utils.timezone import now
from .exceptions import MigrationSchemaMissing
class MigrationRecorder:
"""
Deal with storing migration records in the database.
Because this table is actually itself used for dealing with model
creation, it's the one thing we can't do normally via migrations.
We manually handle table creation/schema updating (using schema backend)
and then have a floating model to do queries with.
If a migration is unapplied its row is removed from the table. Having
a row in the table always means a migration is applied.
"""
class Migration(models.Model):
app = models.CharField(max_length=255)
name = models.CharField(max_length=255)
applied = models.DateTimeField(default=now)
class Meta:
apps = Apps()
app_label = "migrations"
db_table = "django_migrations"
def __str__(self):
return "Migration %s for %s" % (self.name, self.app)
def __init__(self, connection):
self.connection = connection
@property
def migration_qs(self):
return self.Migration.objects.using(self.connection.alias)
def has_table(self):
"""Return True if the django_migrations table exists."""
return self.Migration._meta.db_table in self.connection.introspection.table_names(self.connection.cursor())
def ensure_schema(self):
"""Ensure the table exists and has the correct schema."""
# If the table's there, that's fine - we've never changed its schema
# in the codebase.
if self.has_table():
return
# Make the table
try:
with self.connection.schema_editor() as editor:
editor.create_model(self.Migration)
except DatabaseError as exc:
raise MigrationSchemaMissing("Unable to create the django_migrations table (%s)" % exc)
def applied_migrations(self):
"""Return a set of (app, name) of applied migrations."""
if self.has_table():
return {tuple(x) for x in self.migration_qs.values_list('app', 'name')}
else:
# If the django_migrations table doesn't exist, then no migrations
# are applied.
return set()
def record_applied(self, app, name):
"""Record that a migration was applied."""
self.ensure_schema()
self.migration_qs.create(app=app, name=name)
def record_unapplied(self, app, name):
"""Record that a migration was unapplied."""
self.ensure_schema()
self.migration_qs.filter(app=app, name=name).delete()
def flush(self):
"""Delete all migration records. Useful for testing migrations."""
self.migration_qs.all().delete()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#
# Unit tests for the multiprocessing package
#
import unittest
import queue as pyqueue
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import struct
import operator
import test.support
import test.script_helper
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more revelant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
import threading
import multiprocessing.dummy
import multiprocessing.connection
import multiprocessing.managers
import multiprocessing.heap
import multiprocessing.pool
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
import msvcrt
except ImportError:
msvcrt = None
#
#
#
def latin(s):
return s.encode('latin')
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double
except ImportError:
Structure = object
c_int = c_double = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
@classmethod
def _test_terminate(cls):
time.sleep(100)
def test_terminate(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._test_terminate)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
p.terminate()
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
# XXX sometimes get p.exitcode == 0 on Windows ...
#self.assertEqual(p.exitcode, -signal.SIGTERM)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
sys.stderr = open(testfn, 'w')
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
sys.stderr = open(testfn, 'w')
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason, code in (([1, 2, 3], 1), ('ignore this', 1)):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
p.join(5)
self.assertEqual(p.exitcode, code)
with open(testfn, 'r') as f:
self.assertEqual(f.read().rstrip(), str(reason))
for reason in (True, False, 8):
p = self.Process(target=sys.exit, args=(reason,))
p.daemon = True
p.start()
p.join(5)
self.assertEqual(p.exitcode, reason)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
def test_timeout(self):
q = multiprocessing.Queue()
start = time.time()
self.assertRaises(pyqueue.Empty, q.get, True, 0.2)
delta = time.time() - start
self.assertGreaterEqual(delta, 0.18)
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
for i in range(10):
try:
if get_value(woken) == 6:
break
except NotImplementedError:
break
time.sleep(DELTA)
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
p.join(5)
self.assertFalse(p.is_alive())
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.time()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.time() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=10))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
p.join(5)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(10))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 10)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
f(*args)
b.wait_for_finished()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
e[:],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']])
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(1000)))
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=53)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_make_pool(self):
self.assertRaises(ValueError, multiprocessing.Pool, -1)
self.assertRaises(ValueError, multiprocessing.Pool, 0)
p = multiprocessing.Pool(3)
self.assertEqual(3, len(p._pool))
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
self.assertLess(join.elapsed, 0.5)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with multiprocessing.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
raise AssertionError('expected RuntimeError')
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# If the manager process exited cleanly then the exitcode
# will be zero. Otherwise (after a short timeout)
# terminate() is used, resulting in an exitcode of -SIGTERM.
self.assertEqual(manager._process.exitcode, 0)
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
queue.put(('hello world', None, True, 2.25))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
self.assertEqual(queue.get(), ['hello world', None, True, 2.25])
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
manager.shutdown()
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
manager.shutdown()
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=5)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen(1)
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
heap._lock.acquire()
self.addCleanup(heap._lock.release)
for L in list(heap._len_to_seq.values()):
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, foo, arr, string):
x.value *= 2
y.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0)
bar = copy(foo)
foo.x = 0
foo.y = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(folder, '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.daemon = True
p.start()
self.assertEqual(LEVEL1, reader.recv())
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.daemon = True
p.start()
self.assertEqual(LEVEL2, reader.recv())
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process(q):
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=_test_process, args=(queue,))
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen(4)
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.time()
res = wait([a, b], expected)
delta = time.time() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.time()
res = wait([a, b], 20)
delta = time.time() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.time()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.time() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.time()
res = wait([a], timeout=-1)
t = time.time() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json, subprocess
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
p.join(10)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.script_helper.assert_python_failure(name, sm)
self.assertEqual('', out.decode('ascii'))
self.assertIn('RuntimeError', err.decode('ascii'))
else:
rc, out, err = test.script_helper.assert_python_ok(name, sm)
self.assertEqual('123', out.decode('ascii').rstrip())
self.assertEqual('', err.decode('ascii'))
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recurisvely start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
p.join(timeout=5)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
p.join(timeout=5)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
p.join(timeout=5)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x'*(1024*1024)) # sending 1 MB should block
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x'*(1024*1024))
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['fork', 'spawn', 'forkserver'])
#
# Check that killing process does not leak named semaphores
#
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestSemaphoreTracker(unittest.TestCase):
def test_semaphore_tracker(self):
import subprocess
cmd = '''if 1:
import multiprocessing as mp, time, os
mp.set_start_method("spawn")
lock1 = mp.Lock()
lock2 = mp.Lock()
os.write(%d, lock1._semlock.name.encode("ascii") + b"\\n")
os.write(%d, lock2._semlock.name.encode("ascii") + b"\\n")
time.sleep(10)
'''
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-c', cmd % (w, w)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_multiprocessing.sem_unlink(name1)
p.terminate()
p.wait()
time.sleep(2.0)
with self.assertRaises(OSError) as ctx:
_multiprocessing.sem_unlink(name2)
# docs say it should be ENOENT, but OSX seems to give EINVAL
self.assertIn(ctx.exception.errno, (errno.ENOENT, errno.EINVAL))
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = 'semaphore_tracker: There appear to be 2 leaked semaphores'
self.assertRegex(err, expected)
self.assertRegex(err, 'semaphore_tracker: %r: \[Errno' % name1)
#
# Mixins
#
class ProcessesMixin(object):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(object):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
t = 0.01
while len(multiprocessing.active_children()) > 1 and t < 5:
time.sleep(t)
t *= 2
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
print('Shared objects which still exist at manager shutdown:')
print(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
class ThreadsMixin(object):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
time.sleep(0.5)
multiprocessing.process._cleanup()
gc.collect()
tmp = set(multiprocessing.process._dangling) - set(dangling[0])
if tmp:
print('Dangling processes:', tmp, file=sys.stderr)
del tmp
tmp = set(threading._dangling) - set(dangling[1])
if tmp:
print('Dangling threads:', tmp, file=sys.stderr)
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
"""Data store proxy for a data server."""
import base64
import functools
import os
import threading
import time
import uuid
import logging
from grr.lib import access_control
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import utils
from grr.lib.data_stores import common
from grr.lib.rdfvalues import data_server as rdf_data_server
from grr.lib.rdfvalues import data_store as rdf_data_store
from grr.lib.rdfvalues import protodict as rdf_protodict
BASE_MAP_SUBJECT = "servers_map"
MAP_SUBJECT = "aff4:/" + BASE_MAP_SUBJECT
MAP_VALUE_PREDICATE = "metadata:value"
def RPCWrapper(f):
"""A decorator for converting exceptions to rpc status messages.
This decorator should be inserted below the rpcserver.Handler call to prevent
normal exceptions from reaching the RCP layer. These expected exceptions are
then encoded into the status message of the response.
Args:
f: The function to wrap.
Returns:
A decorator function.
"""
@functools.wraps(f)
def Wrapper(self, request):
"""Wrap the function can catch exceptions, converting them to status."""
failed = True
response = rdf_data_store.DataStoreResponse()
response.status = rdf_data_store.DataStoreResponse.Status.OK
try:
f(self, request, response)
failed = False
except access_control.UnauthorizedAccess as e:
# Attach a copy of the request to the response so the caller can tell why
# we failed the request.
response.Clear()
response.request = request
response.status = (rdf_data_store.DataStoreResponse.Status.
AUTHORIZATION_DENIED)
if e.subject:
response.failed_subject = utils.SmartUnicode(e.subject)
response.status_desc = utils.SmartUnicode(e)
except data_store.Error as e:
# Attach a copy of the request to the response so the caller can tell why
# we failed the request.
response.Clear()
response.request = request
response.status = rdf_data_store.DataStoreResponse.Status.DATA_STORE_ERROR
response.status_desc = utils.SmartUnicode(e)
except access_control.ExpiryError as e:
# Attach a copy of the request to the response so the caller can tell why
# we failed the request.
response.Clear()
response.request = request
response.status = rdf_data_store.DataStoreResponse.Status.TIMEOUT_ERROR
response.status_desc = utils.SmartUnicode(e)
if failed:
# Limit the size of the error report since it can be quite large.
logging.info("Failed: %s", utils.SmartStr(response)[:1000])
serialized_response = response.SerializeToString()
return serialized_response
return Wrapper
class DataStoreService(object):
"""Class that responds to DataStore requests."""
def __init__(self, db):
self.db = db
self.transaction_lock = threading.Lock()
self.transactions = {}
old_pathing = config_lib.CONFIG.Get("Datastore.pathing")
# Need to add a fixed rule for the file where the server mapping is stored.
new_pathing = [r"(?P<path>" + BASE_MAP_SUBJECT + ")"] + old_pathing
self.pathing = new_pathing
self.db.RecreatePathing(self.pathing)
# Every service method must write to the response argument.
# The response will then be serialized to a string.
@RPCWrapper
def MultiSet(self, request, unused_response):
"""Set multiple attributes for a given subject at once."""
values = {}
to_delete = set()
for value in request.values:
if value.option == rdf_data_store.DataStoreValue.Option.REPLACE:
to_delete.add(value.attribute)
timestamp = self.FromTimestampSpec(request.timestamp)
if value.HasField("value"):
if value.HasField("timestamp"):
timestamp = self.FromTimestampSpec(value.timestamp)
values.setdefault(value.attribute, []).append(
(value.value.GetValue(), timestamp))
self.db.MultiSet(request.subject[0], values, to_delete=to_delete,
sync=request.sync, replace=False,
token=request.token)
@RPCWrapper
def ResolveMulti(self, request, response):
"""Resolve multiple attributes for a given subject at once."""
attribute_regex = []
for v in request.values:
attribute_regex.append(v.attribute)
timestamp = self.FromTimestampSpec(request.timestamp)
subject = request.subject[0]
values = self.db.ResolveMulti(
subject, attribute_regex, timestamp=timestamp,
limit=request.limit, token=request.token)
response.results.Append(
subject=subject,
payload=[(attribute, self._Encode(value), int(ts))
for (attribute, value, ts) in values if value])
@RPCWrapper
def MultiResolveRegex(self, request, response):
"""Resolve multiple attributes for a given subject at once."""
attribute_regex = [utils.SmartUnicode(v.attribute) for v in request.values]
timestamp = self.FromTimestampSpec(request.timestamp)
subjects = list(request.subject)
for subject, values in self.db.MultiResolveRegex(
subjects, attribute_regex, timestamp=timestamp,
token=request.token,
limit=request.limit):
response.results.Append(
subject=subject,
payload=[(utils.SmartStr(attribute), self._Encode(value), int(ts))
for (attribute, value, ts) in values])
@RPCWrapper
def DeleteAttributes(self, request, unused_response):
"""Delete attributes from a given subject."""
timestamp = self.FromTimestampSpec(request.timestamp)
subject = request.subject[0]
sync = request.sync
token = request.token
attributes = [v.attribute for v in request.values]
start, end = timestamp # pylint: disable=unpacking-non-sequence
self.db.DeleteAttributes(subject, attributes, start=start, end=end,
token=token, sync=sync)
@RPCWrapper
def DeleteSubject(self, request, unused_response):
subject = request.subject[0]
token = request.token
self.db.DeleteSubject(subject, token=token)
def _NewTransaction(self, subject, duration, response):
transid = utils.SmartStr(uuid.uuid4())
now = time.time()
self.transactions[subject] = (transid, now + duration)
self._AddTransactionId(response, subject, transid)
def _AddTransactionId(self, response, subject, transid):
blob = rdf_protodict.DataBlob(string=transid)
value = rdf_data_store.DataStoreValue(value=blob)
response.results.Append(subject=subject, values=[value])
@RPCWrapper
def LockSubject(self, request, response):
duration = self.FromTimestampSpec(request.timestamp)
if not request.subject:
# No return value.
return
subject = request.subject[0]
with self.transaction_lock:
# Check if there is a transaction.
try:
_, lease = self.transactions[subject]
if time.time() > lease:
self._NewTransaction(subject, duration, response)
else:
# Failed to get transaction.
# Do not need to do anything
pass
except KeyError:
return self._NewTransaction(subject, duration, response)
def _GetTransactionId(self, request):
return request.values[0].value.string
@RPCWrapper
def ExtendSubject(self, request, response):
duration = self.FromTimestampSpec(request.timestamp)
if not request.subject or not request.values:
# No return value.
return
subject = request.subject[0]
transid = self._GetTransactionId(request)
with self.transaction_lock:
# Check if there is a transaction.
try:
current, _ = self.transactions[subject]
if transid != current:
# Invalid transaction ID.
return
self.transactions[subject] = (transid, time.time() + duration)
# Add return value to response.
self._AddTransactionId(response, subject, transid)
except KeyError:
# Invalid transaction ID.
pass
@RPCWrapper
def UnlockSubject(self, request, response):
if not request.subject or not request.values:
return
subject = request.subject[0]
transid = self._GetTransactionId(request)
with self.transaction_lock:
# Check if there is a transaction.
try:
current, _ = self.transactions[subject]
if transid != current:
# Invalid transaction ID.
return
del self.transactions[subject]
# Add return value to response.
self._AddTransactionId(response, subject, transid)
except KeyError:
# Invalid transaction ID.
pass
def FromTimestampSpec(self, timestamp):
"""Converts constants from TimestampSpec() to the datastore ones."""
if timestamp.type == timestamp.Type.NEWEST_TIMESTAMP:
return self.db.NEWEST_TIMESTAMP
if timestamp.type == timestamp.Type.ALL_TIMESTAMPS:
return self.db.ALL_TIMESTAMPS
if timestamp.type == timestamp.Type.RANGED_TIME:
return (int(timestamp.start), int(timestamp.end))
if timestamp.type == timestamp.Type.SPECIFIC_TIME:
return int(timestamp.start)
def _Encode(self, value):
if isinstance(value, str):
return [base64.encodestring(value), 1]
return value
def Size(self):
return self.db.Size()
def LoadServerMapping(self):
"""Retrieve server mapping from database."""
token = access_control.ACLToken(username="GRRSystem").SetUID()
mapping_str, _ = self.db.Resolve(MAP_SUBJECT, MAP_VALUE_PREDICATE,
token=token)
if not mapping_str:
return None
mapping = rdf_data_server.DataServerMapping(mapping_str)
# Restore pathing information.
if self._DifferentPathing(list(mapping.pathing)):
self.pathing = list(mapping.pathing)
self.db.RecreatePathing(self.pathing)
return mapping
def _DifferentPathing(self, new_pathing):
"""Check if we have a new pathing."""
if len(new_pathing) != len(self.pathing):
return True
for i, path in enumerate(new_pathing):
if path != self.pathing[i]:
return True
return False
def SaveServerMapping(self, mapping, create_pathing=False):
"""Stores the server mapping in the data store."""
if create_pathing:
# We are going to use our own pathing.
mapping.pathing = self.pathing
else:
# We are going to use the mapping pathing configuration.
# Check if its different than the one we use now and then ask the
# datastore to use it.
new_pathing = list(mapping.pathing)
if self._DifferentPathing(new_pathing):
self.pathing = new_pathing
self.db.RecreatePathing(new_pathing)
token = access_control.ACLToken(username="GRRSystem").SetUID()
self.db.MultiSet(MAP_SUBJECT, {MAP_VALUE_PREDICATE: mapping}, token=token)
def GetLocation(self):
return self.db.Location()
def GetComponentInformation(self):
"""Return number of components and average size per component."""
loc = self.GetLocation()
if not os.path.exists(loc) or not os.path.isdir(loc):
return 0, 0
size, files = common.DatabaseDirectorySize(loc, self.db.FileExtension())
if files:
return files, int(float(size) / float(files))
return 0, 0
|
unknown
|
codeparrot/codeparrot-clean
| ||
import functools
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from .base import Template
from .context import Context, _builtin_context_processors
from .exceptions import TemplateDoesNotExist
from .library import import_library
class Engine:
default_builtins = [
"django.template.defaulttags",
"django.template.defaultfilters",
"django.template.loader_tags",
]
def __init__(
self,
dirs=None,
app_dirs=False,
context_processors=None,
debug=False,
loaders=None,
string_if_invalid="",
file_charset="utf-8",
libraries=None,
builtins=None,
autoescape=True,
):
if dirs is None:
dirs = []
if context_processors is None:
context_processors = []
if loaders is None:
loaders = ["django.template.loaders.filesystem.Loader"]
if app_dirs:
loaders += ["django.template.loaders.app_directories.Loader"]
loaders = [("django.template.loaders.cached.Loader", loaders)]
else:
if app_dirs:
raise ImproperlyConfigured(
"app_dirs must not be set when loaders is defined."
)
if libraries is None:
libraries = {}
if builtins is None:
builtins = []
self.dirs = dirs
self.app_dirs = app_dirs
self.autoescape = autoescape
self.context_processors = context_processors
self.debug = debug
self.loaders = loaders
self.string_if_invalid = string_if_invalid
self.file_charset = file_charset
self.libraries = libraries
self.template_libraries = self.get_template_libraries(libraries)
self.builtins = self.default_builtins + builtins
self.template_builtins = self.get_template_builtins(self.builtins)
def __repr__(self):
return (
"<%s:%s app_dirs=%s%s debug=%s loaders=%s string_if_invalid=%s "
"file_charset=%s%s%s autoescape=%s>"
) % (
self.__class__.__qualname__,
"" if not self.dirs else " dirs=%s" % repr(self.dirs),
self.app_dirs,
(
""
if not self.context_processors
else " context_processors=%s" % repr(self.context_processors)
),
self.debug,
repr(self.loaders),
repr(self.string_if_invalid),
repr(self.file_charset),
"" if not self.libraries else " libraries=%s" % repr(self.libraries),
"" if not self.builtins else " builtins=%s" % repr(self.builtins),
repr(self.autoescape),
)
@staticmethod
@functools.lru_cache
def get_default():
"""
Return the first DjangoTemplates backend that's configured, or raise
ImproperlyConfigured if none are configured.
This is required for preserving historical APIs that rely on a
globally available, implicitly configured engine such as:
>>> from django.template import Context, Template
>>> template = Template("Hello {{ name }}!")
>>> context = Context({'name': "world"})
>>> template.render(context)
'Hello world!'
"""
# Since Engine is imported in django.template and since
# DjangoTemplates is a wrapper around this Engine class,
# local imports are required to avoid import loops.
from django.template import engines
from django.template.backends.django import DjangoTemplates
for engine in engines.all():
if isinstance(engine, DjangoTemplates):
return engine.engine
raise ImproperlyConfigured("No DjangoTemplates backend is configured.")
@cached_property
def template_context_processors(self):
context_processors = _builtin_context_processors
context_processors += tuple(self.context_processors)
return tuple(import_string(path) for path in context_processors)
def get_template_builtins(self, builtins):
return [import_library(x) for x in builtins]
def get_template_libraries(self, libraries):
loaded = {}
for name, path in libraries.items():
loaded[name] = import_library(path)
return loaded
@cached_property
def template_loaders(self):
return self.get_template_loaders(self.loaders)
def get_template_loaders(self, template_loaders):
loaders = []
for template_loader in template_loaders:
loader = self.find_template_loader(template_loader)
if loader is not None:
loaders.append(loader)
return loaders
def find_template_loader(self, loader):
if isinstance(loader, (tuple, list)):
loader, *args = loader
else:
args = []
if isinstance(loader, str):
loader_class = import_string(loader)
return loader_class(self, *args)
else:
raise ImproperlyConfigured(
"Invalid value in template loaders configuration: %r" % loader
)
def find_template(self, name, dirs=None, skip=None):
tried = []
for loader in self.template_loaders:
try:
template = loader.get_template(name, skip=skip)
return template, template.origin
except TemplateDoesNotExist as e:
tried.extend(e.tried)
raise TemplateDoesNotExist(name, tried=tried)
def from_string(self, template_code):
"""
Return a compiled Template object for the given template code,
handling template inheritance recursively.
"""
return Template(template_code, engine=self)
def get_template(self, template_name):
"""
Return a compiled Template object for the given template name,
handling template inheritance recursively.
"""
original_name = template_name
try:
template_name, _, partial_name = template_name.partition("#")
except AttributeError:
raise TemplateDoesNotExist(original_name)
if not template_name:
raise TemplateDoesNotExist(original_name)
template, origin = self.find_template(template_name)
if not hasattr(template, "render"):
# template needs to be compiled
template = Template(template, origin, template_name, engine=self)
if not partial_name:
return template
extra_data = getattr(template, "extra_data", {})
try:
partial = extra_data["partials"][partial_name]
except (KeyError, TypeError):
raise TemplateDoesNotExist(partial_name, tried=[template_name])
partial.engine = self
return partial
def render_to_string(self, template_name, context=None):
"""
Render the template specified by template_name with the given context.
For use in Django's test suite.
"""
if isinstance(template_name, (list, tuple)):
t = self.select_template(template_name)
else:
t = self.get_template(template_name)
# Django < 1.8 accepted a Context in `context` even though that's
# unintended. Preserve this ability but don't rewrap `context`.
if isinstance(context, Context):
return t.render(context)
else:
return t.render(Context(context, autoescape=self.autoescape))
def select_template(self, template_name_list):
"""
Given a list of template names, return the first that can be loaded.
"""
if not template_name_list:
raise TemplateDoesNotExist("No template names provided")
not_found = []
for template_name in template_name_list:
try:
return self.get_template(template_name)
except TemplateDoesNotExist as exc:
if exc.args[0] not in not_found:
not_found.append(exc.args[0])
continue
# If we get here, none of the templates could be loaded
raise TemplateDoesNotExist(", ".join(not_found))
|
python
|
github
|
https://github.com/django/django
|
django/template/engine.py
|
"""Support for Gogogate2 garage Doors."""
import logging
from pygogogate2 import Gogogate2API as pygogogate2
import voluptuous as vol
from homeassistant.components.cover import SUPPORT_CLOSE, SUPPORT_OPEN, CoverDevice
from homeassistant.const import (
CONF_IP_ADDRESS,
CONF_NAME,
CONF_PASSWORD,
CONF_USERNAME,
STATE_CLOSED,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "gogogate2"
NOTIFICATION_ID = "gogogate2_notification"
NOTIFICATION_TITLE = "Gogogate2 Cover Setup"
COVER_SCHEMA = vol.Schema(
{
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Gogogate2 component."""
ip_address = config.get(CONF_IP_ADDRESS)
name = config.get(CONF_NAME)
password = config.get(CONF_PASSWORD)
username = config.get(CONF_USERNAME)
mygogogate2 = pygogogate2(username, password, ip_address)
try:
devices = mygogogate2.get_devices()
if devices is False:
raise ValueError("Username or Password is incorrect or no devices found")
add_entities(MyGogogate2Device(mygogogate2, door, name) for door in devices)
except (TypeError, KeyError, NameError, ValueError) as ex:
_LOGGER.error("%s", ex)
hass.components.persistent_notification.create(
"Error: {}<br />"
"You will need to restart hass after fixing."
"".format(ex),
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
class MyGogogate2Device(CoverDevice):
"""Representation of a Gogogate2 cover."""
def __init__(self, mygogogate2, device, name):
"""Initialize with API object, device id."""
self.mygogogate2 = mygogogate2
self.device_id = device["door"]
self._name = name or device["name"]
self._status = device["status"]
self._available = None
@property
def name(self):
"""Return the name of the garage door if any."""
return self._name if self._name else DEFAULT_NAME
@property
def is_closed(self):
"""Return true if cover is closed, else False."""
return self._status == STATE_CLOSED
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return "garage"
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_OPEN | SUPPORT_CLOSE
@property
def available(self):
"""Could the device be accessed during the last update call."""
return self._available
def close_cover(self, **kwargs):
"""Issue close command to cover."""
self.mygogogate2.close_device(self.device_id)
def open_cover(self, **kwargs):
"""Issue open command to cover."""
self.mygogogate2.open_device(self.device_id)
def update(self):
"""Update status of cover."""
try:
self._status = self.mygogogate2.get_status(self.device_id)
self._available = True
except (TypeError, KeyError, NameError, ValueError) as ex:
_LOGGER.error("%s", ex)
self._status = None
self._available = False
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright (c) 2007 Mockito contributors
* This program is made available under the terms of the MIT License.
*/
package org.mockito.internal.invocation;
import java.util.LinkedList;
import java.util.List;
import java.util.stream.Collectors;
import org.mockito.internal.verification.api.InOrderContext;
import org.mockito.invocation.Invocation;
import org.mockito.invocation.Location;
import org.mockito.invocation.MatchableInvocation;
public class InvocationsFinder {
private InvocationsFinder() {}
public static List<Invocation> findInvocations(
List<Invocation> invocations, MatchableInvocation wanted) {
return invocations.stream().filter(wanted::matches).collect(Collectors.toList());
}
public static List<Invocation> findAllMatchingUnverifiedChunks(
List<Invocation> invocations,
MatchableInvocation wanted,
InOrderContext orderingContext) {
List<Invocation> unverified = removeVerifiedInOrder(invocations, orderingContext);
return unverified.stream().filter(wanted::matches).collect(Collectors.toList());
}
/**
* some examples how it works:
*
* Given invocations sequence:
* 1,1,2,1
*
* if wanted is 1 and mode is times(2) then returns
* 1,1
*
* if wanted is 1 and mode is atLeast() then returns
* 1,1,1
*
* if wanted is 1 and mode is times(x), where x != 2 then returns
* 1,1,1
*/
public static List<Invocation> findMatchingChunk(
List<Invocation> invocations,
MatchableInvocation wanted,
int wantedCount,
InOrderContext context) {
List<Invocation> unverified = removeVerifiedInOrder(invocations, context);
List<Invocation> firstChunk = getFirstMatchingChunk(wanted, unverified);
if (wantedCount != firstChunk.size()) {
return findAllMatchingUnverifiedChunks(invocations, wanted, context);
} else {
return firstChunk;
}
}
private static List<Invocation> getFirstMatchingChunk(
MatchableInvocation wanted, List<Invocation> unverified) {
List<Invocation> firstChunk = new LinkedList<>();
for (Invocation invocation : unverified) {
if (wanted.matches(invocation)) {
firstChunk.add(invocation);
} else if (!firstChunk.isEmpty()) {
break;
}
}
return firstChunk;
}
public static Invocation findFirstMatchingUnverifiedInvocation(
List<Invocation> invocations, MatchableInvocation wanted, InOrderContext context) {
for (Invocation invocation : removeVerifiedInOrder(invocations, context)) {
if (wanted.matches(invocation)) {
return invocation;
}
}
return null;
}
public static Invocation findSimilarInvocation(
List<Invocation> invocations, MatchableInvocation wanted) {
Invocation firstSimilar = null;
for (Invocation invocation : invocations) {
if (!wanted.hasSimilarMethod(invocation)) {
continue;
}
if (firstSimilar == null) {
firstSimilar = invocation;
}
if (wanted.hasSameMethod(invocation)) {
return invocation;
}
}
return firstSimilar;
}
public static Invocation findFirstUnverified(List<Invocation> invocations) {
return findFirstUnverified(invocations, null);
}
static Invocation findFirstUnverified(List<Invocation> invocations, Object mock) {
for (Invocation i : invocations) {
boolean mockIsValid = mock == null || mock == i.getMock();
if (!i.isVerified() && mockIsValid) {
return i;
}
}
return null;
}
public static Location getLastLocation(List<Invocation> invocations) {
if (invocations.isEmpty()) {
return null;
} else {
Invocation last = invocations.get(invocations.size() - 1);
return last.getLocation();
}
}
public static Invocation findPreviousVerifiedInOrder(
List<Invocation> invocations, InOrderContext context) {
List<Invocation> verifiedOnly =
invocations.stream().filter(context::isVerified).collect(Collectors.toList());
if (verifiedOnly.isEmpty()) {
return null;
} else {
return verifiedOnly.get(verifiedOnly.size() - 1);
}
}
private static List<Invocation> removeVerifiedInOrder(
List<Invocation> invocations, InOrderContext orderingContext) {
List<Invocation> unverified = new LinkedList<>();
for (Invocation i : invocations) {
if (orderingContext.isVerified(i)) {
unverified.clear();
} else {
unverified.add(i);
}
}
return unverified;
}
public static List<Location> getAllLocations(List<Invocation> invocations) {
List<Location> locations = new LinkedList<>();
for (Invocation invocation : invocations) {
locations.add(invocation.getLocation());
}
return locations;
}
/**
* i3 is unverified here:
*
* i1, i2, i3
* v
*
* all good here:
*
* i1, i2, i3
* v v
*
* @param context
* @param orderedInvocations
*/
public static Invocation findFirstUnverifiedInOrder(
InOrderContext context, List<Invocation> orderedInvocations) {
Invocation candidate = null;
for (Invocation i : orderedInvocations) {
if (!context.isVerified(i)) {
candidate = candidate != null ? candidate : i;
} else {
candidate = null;
}
}
return candidate;
}
}
|
java
|
github
|
https://github.com/mockito/mockito
|
mockito-core/src/main/java/org/mockito/internal/invocation/InvocationsFinder.java
|
from sympy.physics.mechanics import (dynamicsymbols, ReferenceFrame, Point,
RigidBody, LagrangesMethod, Particle,
kinetic_energy, dynamicsymbols, inertia,
potential_energy, Lagrangian)
from sympy import symbols, pi, sin, cos, tan, simplify, expand, Function, \
Derivative
def test_disc_on_an_incline_plane():
# Disc rolling on an inclined plane
# First the generalized coordinates are created. The mass center of the
# disc is located from top vertex of the inclined plane by the generalized
# coordinate 'y'. The orientation of the disc is defined by the angle
# 'theta'. The mass of the disc is 'm' and its radius is 'R'. The length of
# the inclined path is 'l', the angle of inclination is 'alpha'. 'g' is the
# gravitational constant.
y, theta = dynamicsymbols('y theta')
yd, thetad = dynamicsymbols('y theta', 1)
m, g, R, l, alpha = symbols('m g R l alpha')
# Next, we create the inertial reference frame 'N'. A reference frame 'A'
# is attached to the inclined plane. Finally a frame is created which is attached to the disk.
N = ReferenceFrame('N')
A = N.orientnew('A', 'Axis', [pi/2 - alpha, N.z])
B = A.orientnew('B', 'Axis', [-theta, A.z])
# Creating the disc 'D'; we create the point that represents the mass
# center of the disc and set its velocity. The inertia dyadic of the disc
# is created. Finally, we create the disc.
Do = Point('Do')
Do.set_vel(N, yd * A.x)
I = m * R**2 / 2 * B.z | B.z
D = RigidBody('D', Do, B, m, (I, Do))
# To construct the Lagrangian, 'L', of the disc, we determine its kinetic
# and potential energies, T and U, respectively. L is defined as the
# difference between T and U.
D.set_potential_energy(m * g * (l - y) * sin(alpha))
L = Lagrangian(N, D)
# We then create the list of generalized coordinates and constraint
# equations. The constraint arises due to the disc rolling without slip on
# on the inclined path. Also, the constraint is holonomic but we supply the
# differentiated holonomic equation as the 'LagrangesMethod' class requires
# that. We then invoke the 'LagrangesMethod' class and supply it the
# necessary arguments and generate the equations of motion. The'rhs' method
# solves for the q_double_dots (i.e. the second derivative with respect to
# time of the generalized coordinates and the lagrange multiplers.
q = [y, theta]
coneq = [yd - R * thetad]
m = LagrangesMethod(L, q, coneq)
m.form_lagranges_equations()
rhs = m.rhs()
rhs.simplify()
assert rhs[2] == 2*g*sin(alpha)/3
def test_simp_pen():
# This tests that the equations generated by LagrangesMethod are identical
# to those obtained by hand calculations. The system under consideration is
# the simple pendulum.
# We begin by creating the generalized coordinates as per the requirements
# of LagrangesMethod. Also we created the associate symbols
# that characterize the system: 'm' is the mass of the bob, l is the length
# of the massless rigid rod connecting the bob to a point O fixed in the
# inertial frame.
q, u = dynamicsymbols('q u')
qd, ud = dynamicsymbols('q u ', 1)
l, m, g = symbols('l m g')
# We then create the inertial frame and a frame attached to the massless
# string following which we define the inertial angular velocity of the
# string.
N = ReferenceFrame('N')
A = N.orientnew('A', 'Axis', [q, N.z])
A.set_ang_vel(N, qd * N.z)
# Next, we create the point O and fix it in the inertial frame. We then
# locate the point P to which the bob is attached. Its corresponding
# velocity is then determined by the 'two point formula'.
O = Point('O')
O.set_vel(N, 0)
P = O.locatenew('P', l * A.x)
P.v2pt_theory(O, N, A)
# The 'Particle' which represents the bob is then created and its
# Lagrangian generated.
Pa = Particle('Pa', P, m)
Pa.set_potential_energy(- m * g * l * cos(q))
L = Lagrangian(N, Pa)
# The 'LagrangesMethod' class is invoked to obtain equations of motion.
lm = LagrangesMethod(L, [q])
lm.form_lagranges_equations()
RHS = lm.rhs()
assert RHS[1] == -g*sin(q)/l
def test_dub_pen():
# The system considered is the double pendulum. Like in the
# test of the simple pendulum above, we begin by creating the generalized
# coordinates and the simple generalized speeds and accelerations which
# will be used later. Following this we create frames and points necessary
# for the kinematics. The procedure isn't explicitly explained as this is
# similar to the simple pendulum. Also this is documented on the pydy.org
# website.
q1, q2 = dynamicsymbols('q1 q2')
q1d, q2d = dynamicsymbols('q1 q2', 1)
q1dd, q2dd = dynamicsymbols('q1 q2', 2)
u1, u2 = dynamicsymbols('u1 u2')
u1d, u2d = dynamicsymbols('u1 u2', 1)
l, m, g = symbols('l m g')
N = ReferenceFrame('N')
A = N.orientnew('A', 'Axis', [q1, N.z])
B = N.orientnew('B', 'Axis', [q2, N.z])
A.set_ang_vel(N, q1d * A.z)
B.set_ang_vel(N, q2d * A.z)
O = Point('O')
P = O.locatenew('P', l * A.x)
R = P.locatenew('R', l * B.x)
O.set_vel(N, 0)
P.v2pt_theory(O, N, A)
R.v2pt_theory(P, N, B)
ParP = Particle('ParP', P, m)
ParR = Particle('ParR', R, m)
ParP.set_potential_energy(- m * g * l * cos(q1))
ParR.set_potential_energy(- m * g * l * cos(q1) - m * g * l * cos(q2))
L = Lagrangian(N, ParP, ParR)
lm = LagrangesMethod(L, [q1, q2])
lm.form_lagranges_equations()
assert simplify(l*m*(2*g*sin(q1) + l*sin(q1)*sin(q2)*q2dd
+ l*sin(q1)*cos(q2)*q2d**2 - l*sin(q2)*cos(q1)*q2d**2
+ l*cos(q1)*cos(q2)*q2dd + 2*l*q1dd) - lm.eom[0]) == 0
assert simplify(l*m*(g*sin(q2) + l*sin(q1)*sin(q2)*q1dd
- l*sin(q1)*cos(q2)*q1d**2 + l*sin(q2)*cos(q1)*q1d**2
+ l*cos(q1)*cos(q2)*q1dd + l*q2dd) - lm.eom[1]) == 0
def test_rolling_disc():
# Rolling Disc Example
# Here the rolling disc is formed from the contact point up, removing the
# need to introduce generalized speeds. Only 3 configuration and 3
# speed variables are need to describe this system, along with the
# disc's mass and radius, and the local gravity.
q1, q2, q3 = dynamicsymbols('q1 q2 q3')
q1d, q2d, q3d = dynamicsymbols('q1 q2 q3', 1)
r, m, g = symbols('r m g')
# The kinematics are formed by a series of simple rotations. Each simple
# rotation creates a new frame, and the next rotation is defined by the new
# frame's basis vectors. This example uses a 3-1-2 series of rotations, or
# Z, X, Y series of rotations. Angular velocity for this is defined using
# the second frame's basis (the lean frame).
N = ReferenceFrame('N')
Y = N.orientnew('Y', 'Axis', [q1, N.z])
L = Y.orientnew('L', 'Axis', [q2, Y.x])
R = L.orientnew('R', 'Axis', [q3, L.y])
# This is the translational kinematics. We create a point with no velocity
# in N; this is the contact point between the disc and ground. Next we form
# the position vector from the contact point to the disc's center of mass.
# Finally we form the velocity and acceleration of the disc.
C = Point('C')
C.set_vel(N, 0)
Dmc = C.locatenew('Dmc', r * L.z)
Dmc.v2pt_theory(C, N, R)
# Forming the inertia dyadic.
I = inertia(L, m / 4 * r**2, m / 2 * r**2, m / 4 * r**2)
BodyD = RigidBody('BodyD', Dmc, R, m, (I, Dmc))
# Finally we form the equations of motion, using the same steps we did
# before. Supply the Lagrangian, the generalized speeds.
BodyD.set_potential_energy(- m * g * r * cos(q2))
Lag = Lagrangian(N, BodyD)
q = [q1, q2, q3]
q1 = Function('q1')
q2 = Function('q2')
q3 = Function('q3')
l = LagrangesMethod(Lag, q)
l.form_lagranges_equations()
RHS = l.rhs()
RHS.simplify()
t = symbols('t')
assert (l.mass_matrix[3:6] == [0, 5*m*r**2/4, 0])
assert RHS[4].simplify() == (
(-8*g*sin(q2(t)) + r*(5*sin(2*q2(t))*Derivative(q1(t), t) +
12*cos(q2(t))*Derivative(q3(t), t))*Derivative(q1(t), t))/(10*r))
assert RHS[5] == (-5*cos(q2(t))*Derivative(q1(t), t) + 6*tan(q2(t)
)*Derivative(q3(t), t) + 4*Derivative(q1(t), t)/cos(q2(t))
)*Derivative(q2(t), t)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.tests.functional.v3 import test_servers
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
class ServersIpsJsonTest(test_servers.ServersSampleBase):
extends_name = 'core_only'
sample_dir = 'server-ips'
extra_extensions_to_load = ["os-access-ips"]
# TODO(park): Overriding '_api_version' till all functional tests
# are merged between v2 and v2.1. After that base class variable
# itself can be changed to 'v2'
_api_version = 'v2'
def test_get(self):
# Test getting a server's IP information.
uuid = self._post_server()
response = self._do_get('servers/%s/ips' % uuid)
subs = self._get_regexes()
self._verify_response('server-ips-resp', subs, response, 200)
def test_get_by_network(self):
# Test getting a server's IP information by network id.
uuid = self._post_server()
response = self._do_get('servers/%s/ips/private' % uuid)
subs = self._get_regexes()
self._verify_response('server-ips-network-resp', subs, response, 200)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# pylint: disable=g-bad-file-header
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from src.test.py.bazel import test_base
class QueryTest(test_base.TestBase):
def testSimpleQuery(self):
self.ScratchFile('WORKSPACE')
self.ScratchFile('foo/BUILD', [
'exports_files(["exported.txt"])',
'filegroup(name = "top-rule", srcs = [":dep-rule"])',
'filegroup(name = "dep-rule", srcs = ["src.txt"])',
])
self.ScratchFile('foo/src.txt')
self.ScratchFile('foo/exported.txt')
self.ScratchFile('foo/non-exported.txt')
self._AssertQueryOutput('//foo:top-rule', '//foo:top-rule')
self._AssertQueryOutput('//foo:*', '//foo:top-rule', '//foo:dep-rule',
'//foo:src.txt', '//foo:exported.txt',
'//foo:BUILD')
self._AssertQueryOutput('deps(//foo:top-rule)', '//foo:top-rule',
'//foo:dep-rule', '//foo:src.txt')
self._AssertQueryOutput('deps(//foo:top-rule, 1)', '//foo:top-rule',
'//foo:dep-rule')
def _AssertQueryOutput(self, query_expr, *expected_results):
exit_code, stdout, stderr = self.RunBazel(['query', query_expr])
self.AssertExitCode(exit_code, 0, stderr)
stdout = sorted(x for x in stdout if x)
self.assertEqual(len(stdout), len(expected_results))
self.assertListEqual(stdout, sorted(expected_results))
if __name__ == '__main__':
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, blocks
class test_conjugate (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_000 (self):
src_data = (-2-2j, -1-1j, -2+2j, -1+1j,
2-2j, 1-1j, 2+2j, 1+1j,
0+0j)
exp_data = (-2+2j, -1+1j, -2-2j, -1-1j,
2+2j, 1+1j, 2-2j, 1-1j,
0-0j)
src = blocks.vector_source_c(src_data)
op = blocks.conjugate_cc ()
dst = blocks.vector_sink_c ()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data ()
self.assertEqual (exp_data, result_data)
if __name__ == '__main__':
gr_unittest.run(test_conjugate, "test_conjugate.xml")
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
Various Windows specific bits and pieces
"""
import sys
if sys.platform != 'win32': # pragma: no cover
raise ImportError('win32 only')
import socket
import itertools
import msvcrt
import os
import subprocess
import tempfile
import _winapi
__all__ = ['socketpair', 'pipe', 'Popen', 'PIPE', 'PipeHandle']
# Constants/globals
BUFSIZE = 8192
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
_mmap_counter = itertools.count()
# Replacement for socket.socketpair()
def socketpair(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0):
"""A socket pair usable as a self-pipe, for Windows.
Origin: https://gist.github.com/4325783, by Geert Jansen. Public domain.
"""
if family == socket.AF_INET:
host = '127.0.0.1'
elif family == socket.AF_INET6:
host = '::1'
else:
raise ValueError("Ony AF_INET and AF_INET6 socket address families "
"are supported")
if type != socket.SOCK_STREAM:
raise ValueError("Only SOCK_STREAM socket type is supported")
if proto != 0:
raise ValueError("Only protocol zero is supported")
# We create a connected TCP socket. Note the trick with setblocking(0)
# that prevents us from having to create a thread.
lsock = socket.socket(family, type, proto)
try:
lsock.bind((host, 0))
lsock.listen(1)
# On IPv6, ignore flow_info and scope_id
addr, port = lsock.getsockname()[:2]
csock = socket.socket(family, type, proto)
try:
csock.setblocking(False)
try:
csock.connect((addr, port))
except (BlockingIOError, InterruptedError):
pass
ssock, _ = lsock.accept()
csock.setblocking(True)
except:
csock.close()
raise
finally:
lsock.close()
return (ssock, csock)
# Replacement for os.pipe() using handles instead of fds
def pipe(*, duplex=False, overlapped=(True, True), bufsize=BUFSIZE):
"""Like os.pipe() but with overlapped support and using handles not fds."""
address = tempfile.mktemp(prefix=r'\\.\pipe\python-pipe-%d-%d-' %
(os.getpid(), next(_mmap_counter)))
if duplex:
openmode = _winapi.PIPE_ACCESS_DUPLEX
access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE
obsize, ibsize = bufsize, bufsize
else:
openmode = _winapi.PIPE_ACCESS_INBOUND
access = _winapi.GENERIC_WRITE
obsize, ibsize = 0, bufsize
openmode |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
if overlapped[0]:
openmode |= _winapi.FILE_FLAG_OVERLAPPED
if overlapped[1]:
flags_and_attribs = _winapi.FILE_FLAG_OVERLAPPED
else:
flags_and_attribs = 0
h1 = h2 = None
try:
h1 = _winapi.CreateNamedPipe(
address, openmode, _winapi.PIPE_WAIT,
1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL)
h2 = _winapi.CreateFile(
address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING,
flags_and_attribs, _winapi.NULL)
ov = _winapi.ConnectNamedPipe(h1, overlapped=True)
ov.GetOverlappedResult(True)
return h1, h2
except:
if h1 is not None:
_winapi.CloseHandle(h1)
if h2 is not None:
_winapi.CloseHandle(h2)
raise
# Wrapper for a pipe handle
class PipeHandle:
"""Wrapper for an overlapped pipe handle which is vaguely file-object like.
The IOCP event loop can use these instead of socket objects.
"""
def __init__(self, handle):
self._handle = handle
@property
def handle(self):
return self._handle
def fileno(self):
return self._handle
def close(self, *, CloseHandle=_winapi.CloseHandle):
if self._handle != -1:
CloseHandle(self._handle)
self._handle = -1
__del__ = close
def __enter__(self):
return self
def __exit__(self, t, v, tb):
self.close()
# Replacement for subprocess.Popen using overlapped pipe handles
class Popen(subprocess.Popen):
"""Replacement for subprocess.Popen using overlapped pipe handles.
The stdin, stdout, stderr are None or instances of PipeHandle.
"""
def __init__(self, args, stdin=None, stdout=None, stderr=None, **kwds):
assert not kwds.get('universal_newlines')
assert kwds.get('bufsize', 0) == 0
stdin_rfd = stdout_wfd = stderr_wfd = None
stdin_wh = stdout_rh = stderr_rh = None
if stdin == PIPE:
stdin_rh, stdin_wh = pipe(overlapped=(False, True), duplex=True)
stdin_rfd = msvcrt.open_osfhandle(stdin_rh, os.O_RDONLY)
else:
stdin_rfd = stdin
if stdout == PIPE:
stdout_rh, stdout_wh = pipe(overlapped=(True, False))
stdout_wfd = msvcrt.open_osfhandle(stdout_wh, 0)
else:
stdout_wfd = stdout
if stderr == PIPE:
stderr_rh, stderr_wh = pipe(overlapped=(True, False))
stderr_wfd = msvcrt.open_osfhandle(stderr_wh, 0)
elif stderr == STDOUT:
stderr_wfd = stdout_wfd
else:
stderr_wfd = stderr
try:
super().__init__(args, stdin=stdin_rfd, stdout=stdout_wfd,
stderr=stderr_wfd, **kwds)
except:
for h in (stdin_wh, stdout_rh, stderr_rh):
if h is not None:
_winapi.CloseHandle(h)
raise
else:
if stdin_wh is not None:
self.stdin = PipeHandle(stdin_wh)
if stdout_rh is not None:
self.stdout = PipeHandle(stdout_rh)
if stderr_rh is not None:
self.stderr = PipeHandle(stderr_rh)
finally:
if stdin == PIPE:
os.close(stdin_rfd)
if stdout == PIPE:
os.close(stdout_wfd)
if stderr == PIPE:
os.close(stderr_wfd)
|
unknown
|
codeparrot/codeparrot-clean
| ||
import os
import stat
import sys
import unittest
from contextlib import closing
from functools import partial
from pathlib import Path
from test.support import import_helper, os_helper
dbm_sqlite3 = import_helper.import_module("dbm.sqlite3")
# N.B. The test will fail on some platforms without sqlite3
# if the sqlite3 import is above the import of dbm.sqlite3.
# This is deliberate: if the import helper managed to import dbm.sqlite3,
# we must inevitably be able to import sqlite3. Else, we have a problem.
import sqlite3
from dbm.sqlite3 import _normalize_uri
root_in_posix = False
if hasattr(os, 'geteuid'):
root_in_posix = (os.geteuid() == 0)
class _SQLiteDbmTests(unittest.TestCase):
def setUp(self):
self.filename = os_helper.TESTFN
db = dbm_sqlite3.open(self.filename, "c")
db.close()
def tearDown(self):
for suffix in "", "-wal", "-shm":
os_helper.unlink(self.filename + suffix)
class URI(unittest.TestCase):
def test_uri_substitutions(self):
dataset = (
("/absolute/////b/c", "/absolute/b/c"),
("PRE#MID##END", "PRE%23MID%23%23END"),
("%#?%%#", "%25%23%3F%25%25%23"),
)
for path, normalized in dataset:
with self.subTest(path=path, normalized=normalized):
self.assertEndsWith(_normalize_uri(path), normalized)
@unittest.skipUnless(sys.platform == "win32", "requires Windows")
def test_uri_windows(self):
dataset = (
# Relative subdir.
(r"2018\January.xlsx",
"2018/January.xlsx"),
# Absolute with drive letter.
(r"C:\Projects\apilibrary\apilibrary.sln",
"/C:/Projects/apilibrary/apilibrary.sln"),
# Relative with drive letter.
(r"C:Projects\apilibrary\apilibrary.sln",
"/C:Projects/apilibrary/apilibrary.sln"),
)
for path, normalized in dataset:
with self.subTest(path=path, normalized=normalized):
if not Path(path).is_absolute():
self.skipTest(f"skipping relative path: {path!r}")
self.assertEndsWith(_normalize_uri(path), normalized)
class ReadOnly(_SQLiteDbmTests):
def setUp(self):
super().setUp()
with dbm_sqlite3.open(self.filename, "w") as db:
db[b"key1"] = "value1"
db[b"key2"] = "value2"
self.db = dbm_sqlite3.open(self.filename, "r")
def tearDown(self):
self.db.close()
super().tearDown()
def test_readonly_read(self):
self.assertEqual(self.db[b"key1"], b"value1")
self.assertEqual(self.db[b"key2"], b"value2")
def test_readonly_write(self):
with self.assertRaises(dbm_sqlite3.error):
self.db[b"new"] = "value"
def test_readonly_delete(self):
with self.assertRaises(dbm_sqlite3.error):
del self.db[b"key1"]
def test_readonly_keys(self):
self.assertEqual(self.db.keys(), [b"key1", b"key2"])
def test_readonly_iter(self):
self.assertEqual([k for k in self.db], [b"key1", b"key2"])
@unittest.skipIf(root_in_posix, "test is meanless with root privilege")
class ReadOnlyFilesystem(unittest.TestCase):
def setUp(self):
self.test_dir = os_helper.TESTFN
self.addCleanup(os_helper.rmtree, self.test_dir)
os.mkdir(self.test_dir)
self.db_path = os.path.join(self.test_dir, "test.db")
db = dbm_sqlite3.open(self.db_path, "c")
db[b"key"] = b"value"
db.close()
def test_readonly_file_read(self):
os.chmod(self.db_path, stat.S_IREAD)
with dbm_sqlite3.open(self.db_path, "r") as db:
self.assertEqual(db[b"key"], b"value")
def test_readonly_file_write(self):
os.chmod(self.db_path, stat.S_IREAD)
with dbm_sqlite3.open(self.db_path, "w") as db:
with self.assertRaises(dbm_sqlite3.error):
db[b"newkey"] = b"newvalue"
def test_readonly_dir_read(self):
os.chmod(self.test_dir, stat.S_IREAD | stat.S_IEXEC)
with dbm_sqlite3.open(self.db_path, "r") as db:
self.assertEqual(db[b"key"], b"value")
def test_readonly_dir_write(self):
os.chmod(self.test_dir, stat.S_IREAD | stat.S_IEXEC)
with dbm_sqlite3.open(self.db_path, "w") as db:
try:
db[b"newkey"] = b"newvalue"
modified = True # on Windows and macOS
except dbm_sqlite3.error:
modified = False
with dbm_sqlite3.open(self.db_path, "r") as db:
if modified:
self.assertEqual(db[b"newkey"], b"newvalue")
else:
self.assertNotIn(b"newkey", db)
class ReadWrite(_SQLiteDbmTests):
def setUp(self):
super().setUp()
self.db = dbm_sqlite3.open(self.filename, "w")
def tearDown(self):
self.db.close()
super().tearDown()
def db_content(self):
with closing(sqlite3.connect(self.filename)) as cx:
keys = [r[0] for r in cx.execute("SELECT key FROM Dict")]
vals = [r[0] for r in cx.execute("SELECT value FROM Dict")]
return keys, vals
def test_readwrite_unique_key(self):
self.db["key"] = "value"
self.db["key"] = "other"
keys, vals = self.db_content()
self.assertEqual(keys, [b"key"])
self.assertEqual(vals, [b"other"])
def test_readwrite_delete(self):
self.db["key"] = "value"
self.db["new"] = "other"
del self.db[b"new"]
keys, vals = self.db_content()
self.assertEqual(keys, [b"key"])
self.assertEqual(vals, [b"value"])
del self.db[b"key"]
keys, vals = self.db_content()
self.assertEqual(keys, [])
self.assertEqual(vals, [])
def test_readwrite_null_key(self):
with self.assertRaises(dbm_sqlite3.error):
self.db[None] = "value"
def test_readwrite_null_value(self):
with self.assertRaises(dbm_sqlite3.error):
self.db[b"key"] = None
class Misuse(_SQLiteDbmTests):
def setUp(self):
super().setUp()
self.db = dbm_sqlite3.open(self.filename, "w")
def tearDown(self):
self.db.close()
super().tearDown()
def test_misuse_double_create(self):
self.db["key"] = "value"
with dbm_sqlite3.open(self.filename, "c") as db:
self.assertEqual(db[b"key"], b"value")
def test_misuse_double_close(self):
self.db.close()
def test_misuse_invalid_flag(self):
regex = "must be.*'r'.*'w'.*'c'.*'n', not 'invalid'"
with self.assertRaisesRegex(ValueError, regex):
dbm_sqlite3.open(self.filename, flag="invalid")
def test_misuse_double_delete(self):
self.db["key"] = "value"
del self.db[b"key"]
with self.assertRaises(KeyError):
del self.db[b"key"]
def test_misuse_invalid_key(self):
with self.assertRaises(KeyError):
self.db[b"key"]
def test_misuse_iter_close1(self):
self.db["1"] = 1
it = iter(self.db)
self.db.close()
with self.assertRaises(dbm_sqlite3.error):
next(it)
def test_misuse_iter_close2(self):
self.db["1"] = 1
self.db["2"] = 2
it = iter(self.db)
next(it)
self.db.close()
with self.assertRaises(dbm_sqlite3.error):
next(it)
def test_misuse_use_after_close(self):
self.db.close()
with self.assertRaises(dbm_sqlite3.error):
self.db[b"read"]
with self.assertRaises(dbm_sqlite3.error):
self.db[b"write"] = "value"
with self.assertRaises(dbm_sqlite3.error):
del self.db[b"del"]
with self.assertRaises(dbm_sqlite3.error):
len(self.db)
with self.assertRaises(dbm_sqlite3.error):
self.db.keys()
def test_misuse_reinit(self):
with self.assertRaises(dbm_sqlite3.error):
self.db.__init__("new.db", flag="n", mode=0o666)
def test_misuse_empty_filename(self):
for flag in "r", "w", "c", "n":
with self.assertRaises(dbm_sqlite3.error):
db = dbm_sqlite3.open("", flag="c")
class DataTypes(_SQLiteDbmTests):
dataset = (
# (raw, coerced)
(42, b"42"),
(3.14, b"3.14"),
("string", b"string"),
(b"bytes", b"bytes"),
)
def setUp(self):
super().setUp()
self.db = dbm_sqlite3.open(self.filename, "w")
def tearDown(self):
self.db.close()
super().tearDown()
def test_datatypes_values(self):
for raw, coerced in self.dataset:
with self.subTest(raw=raw, coerced=coerced):
self.db["key"] = raw
self.assertEqual(self.db[b"key"], coerced)
def test_datatypes_keys(self):
for raw, coerced in self.dataset:
with self.subTest(raw=raw, coerced=coerced):
self.db[raw] = "value"
self.assertEqual(self.db[coerced], b"value")
# Raw keys are silently coerced to bytes.
self.assertEqual(self.db[raw], b"value")
del self.db[raw]
def test_datatypes_replace_coerced(self):
self.db["10"] = "value"
self.db[b"10"] = "value"
self.db[10] = "value"
self.assertEqual(self.db.keys(), [b"10"])
class CorruptDatabase(_SQLiteDbmTests):
"""Verify that database exceptions are raised as dbm.sqlite3.error."""
def setUp(self):
super().setUp()
with closing(sqlite3.connect(self.filename)) as cx:
with cx:
cx.execute("DROP TABLE IF EXISTS Dict")
cx.execute("CREATE TABLE Dict (invalid_schema)")
def check(self, flag, fn, should_succeed=False):
with closing(dbm_sqlite3.open(self.filename, flag)) as db:
with self.assertRaises(dbm_sqlite3.error):
fn(db)
@staticmethod
def read(db):
return db["key"]
@staticmethod
def write(db):
db["key"] = "value"
@staticmethod
def iter(db):
next(iter(db))
@staticmethod
def keys(db):
db.keys()
@staticmethod
def del_(db):
del db["key"]
@staticmethod
def len_(db):
len(db)
def test_corrupt_readwrite(self):
for flag in "r", "w", "c":
with self.subTest(flag=flag):
check = partial(self.check, flag=flag)
check(fn=self.read)
check(fn=self.write)
check(fn=self.iter)
check(fn=self.keys)
check(fn=self.del_)
check(fn=self.len_)
def test_corrupt_force_new(self):
with closing(dbm_sqlite3.open(self.filename, "n")) as db:
db["foo"] = "write"
_ = db[b"foo"]
next(iter(db))
del db[b"foo"]
if __name__ == "__main__":
unittest.main()
|
python
|
github
|
https://github.com/python/cpython
|
Lib/test/test_dbm_sqlite3.py
|
'use strict';
const common = require('../common');
const fs = require('fs');
const assert = require('assert');
const tmpdir = require('../../test/common/tmpdir');
const bench = common.createBenchmark(main, {
type: ['valid-string', 'valid-buffer', 'invalid'],
n: [1e4],
});
function main({ n, type }) {
tmpdir.refresh();
const options = { encoding: 'utf8' };
let prefix;
let out = true;
switch (type) {
case 'valid-string':
prefix = tmpdir.resolve(`${Date.now()}`);
break;
case 'valid-buffer':
prefix = Buffer.from(tmpdir.resolve(`${Date.now()}`));
break;
case 'invalid':
prefix = tmpdir.resolve('non-existent', 'foo', 'bar');
break;
default:
new Error('Invalid type');
}
bench.start();
for (let i = 0; i < n; i++) {
try {
out = fs.mkdtempSync(prefix, options);
} catch {
// do nothing
}
}
bench.end(n);
assert.ok(out);
}
|
javascript
|
github
|
https://github.com/nodejs/node
|
benchmark/fs/bench-mkdtempSync.js
|
import utils from '../utils.js';
import platform from '../platform/index.js';
export default platform.hasStandardBrowserEnv ?
// Standard browser envs support document.cookie
{
write(name, value, expires, path, domain, secure, sameSite) {
if (typeof document === 'undefined') return;
const cookie = [`${name}=${encodeURIComponent(value)}`];
if (utils.isNumber(expires)) {
cookie.push(`expires=${new Date(expires).toUTCString()}`);
}
if (utils.isString(path)) {
cookie.push(`path=${path}`);
}
if (utils.isString(domain)) {
cookie.push(`domain=${domain}`);
}
if (secure === true) {
cookie.push('secure');
}
if (utils.isString(sameSite)) {
cookie.push(`SameSite=${sameSite}`);
}
document.cookie = cookie.join('; ');
},
read(name) {
if (typeof document === 'undefined') return null;
const match = document.cookie.match(new RegExp('(?:^|; )' + name + '=([^;]*)'));
return match ? decodeURIComponent(match[1]) : null;
},
remove(name) {
this.write(name, '', Date.now() - 86400000, '/');
}
}
:
// Non-standard browser env (web workers, react-native) lack needed support.
{
write() {},
read() {
return null;
},
remove() {}
};
|
javascript
|
github
|
https://github.com/axios/axios
|
lib/helpers/cookies.js
|
from Firefly import logging
from Firefly.automation import Automation
from Firefly.helpers.events import Command
from Firefly.const import AUTHOR
# TODO: move this to automation
from Firefly.util.conditions import check_conditions
TITLE = 'Time Based Actions'
COMMANDS = ['ADD_ACTION']
def Setup(firefly, package, **kwargs):
"""
Args:
firefly:
package:
kwargs:
"""
logging.message('Entering %s setup Routine')
tba = TimeBasedAction(firefly, package, **kwargs)
# TODO: Replace this with a new firefly.add_device() function
firefly.components[tba.id] = tba
class TimeBasedAction(Automation):
"""
"""
def __init__(self, firefly, package, **kwargs):
"""
Args:
firefly:
package:
kwargs:
"""
super().__init__(firefly, package, TITLE, AUTHOR, self.event_handler, **kwargs)
self.add_command('ADD_ACTION', self.add_action)
# actions should be in the form {'command': <COMMAND DICT>, 'conditions': <CONDITIONS DICT>}
def event_handler(self, event, **kwargs):
"""
Args:
event:
kwargs:
Returns:
"""
r = True
for a in self.actions:
a.execute_action(self._firefly)
return r
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DocsService extends the GDataService to streamline Google Documents
operations.
DocsService: Provides methods to query feeds and manipulate items.
Extends GDataService.
DocumentQuery: Queries a Google Document list feed.
DocumentAclQuery: Queries a Google Document Acl feed.
"""
__author__ = ('api.jfisher (Jeff Fisher), '
'e.bidelman (Eric Bidelman)')
import re
import atom
import gdata.service
import gdata.docs
import urllib.request, urllib.parse, urllib.error
# XML Namespaces used in Google Documents entities.
DATA_KIND_SCHEME = gdata.GDATA_NAMESPACE + '#kind'
DOCUMENT_LABEL = 'document'
SPREADSHEET_LABEL = 'spreadsheet'
PRESENTATION_LABEL = 'presentation'
FOLDER_LABEL = 'folder'
PDF_LABEL = 'pdf'
LABEL_SCHEME = gdata.GDATA_NAMESPACE + '/labels'
STARRED_LABEL_TERM = LABEL_SCHEME + '#starred'
TRASHED_LABEL_TERM = LABEL_SCHEME + '#trashed'
HIDDEN_LABEL_TERM = LABEL_SCHEME + '#hidden'
MINE_LABEL_TERM = LABEL_SCHEME + '#mine'
PRIVATE_LABEL_TERM = LABEL_SCHEME + '#private'
SHARED_WITH_DOMAIN_LABEL_TERM = LABEL_SCHEME + '#shared-with-domain'
VIEWED_LABEL_TERM = LABEL_SCHEME + '#viewed'
FOLDERS_SCHEME_PREFIX = gdata.docs.DOCUMENTS_NAMESPACE + '/folders/'
# File extensions of documents that are permitted to be uploaded or downloaded.
SUPPORTED_FILETYPES = {
'CSV': 'text/csv',
'TSV': 'text/tab-separated-values',
'TAB': 'text/tab-separated-values',
'DOC': 'application/msword',
'DOCX': ('application/vnd.openxmlformats-officedocument.'
'wordprocessingml.document'),
'ODS': 'application/x-vnd.oasis.opendocument.spreadsheet',
'ODT': 'application/vnd.oasis.opendocument.text',
'RTF': 'application/rtf',
'SXW': 'application/vnd.sun.xml.writer',
'TXT': 'text/plain',
'XLS': 'application/vnd.ms-excel',
'XLSX': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'PDF': 'application/pdf',
'PNG': 'image/png',
'PPT': 'application/vnd.ms-powerpoint',
'PPS': 'application/vnd.ms-powerpoint',
'HTM': 'text/html',
'HTML': 'text/html',
'ZIP': 'application/zip',
'SWF': 'application/x-shockwave-flash'
}
class DocsService(gdata.service.GDataService):
"""Client extension for the Google Documents service Document List feed."""
__FILE_EXT_PATTERN = re.compile('.*\.([a-zA-Z]{3,}$)')
__RESOURCE_ID_PATTERN = re.compile('^([a-z]*)(:|%3A)([\w-]*)$')
def __init__(self, email=None, password=None, source=None,
server='docs.google.com', additional_headers=None, **kwargs):
"""Creates a client for the Google Documents service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'docs.google.com'.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(
self, email=email, password=password, service='writely', source=source,
server=server, additional_headers=additional_headers, **kwargs)
self.ssl = True
def _MakeKindCategory(self, label):
if label is None:
return None
return atom.Category(scheme=DATA_KIND_SCHEME,
term=gdata.docs.DOCUMENTS_NAMESPACE + '#' + label, label=label)
def _MakeContentLinkFromId(self, resource_id):
match = self.__RESOURCE_ID_PATTERN.match(resource_id)
label = match.group(1)
doc_id = match.group(3)
if label == DOCUMENT_LABEL:
return '/feeds/download/documents/Export?docId=%s' % doc_id
if label == PRESENTATION_LABEL:
return '/feeds/download/presentations/Export?docId=%s' % doc_id
if label == SPREADSHEET_LABEL:
return ('https://spreadsheets.google.com/feeds/download/spreadsheets/'
'Export?key=%s' % doc_id)
raise ValueError('Invalid resource id: %s' % resource_id)
def _UploadFile(self, media_source, title, category, folder_or_uri=None):
"""Uploads a file to the Document List feed.
Args:
media_source: A gdata.MediaSource object containing the file to be
uploaded.
title: string The title of the document on the server after being
uploaded.
category: An atom.Category object specifying the appropriate document
type.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the document created on
the Google Documents service.
"""
if folder_or_uri:
try:
uri = folder_or_uri.content.src
except AttributeError:
uri = folder_or_uri
else:
uri = '/feeds/documents/private/full'
entry = gdata.docs.DocumentListEntry()
entry.title = atom.Title(text=title)
if category is not None:
entry.category.append(category)
entry = self.Post(entry, uri, media_source=media_source,
extra_headers={'Slug': media_source.file_name},
converter=gdata.docs.DocumentListEntryFromString)
return entry
def _DownloadFile(self, uri, file_path):
"""Downloads a file.
Args:
uri: string The full Export URL to download the file from.
file_path: string The full path to save the file to.
Raises:
RequestError: on error response from server.
"""
server_response = self.request('GET', uri)
response_body = server_response.read()
timeout = 5
while server_response.status == 302 and timeout > 0:
server_response = self.request('GET',
server_response.getheader('Location'))
response_body = server_response.read()
timeout -= 1
if server_response.status != 200:
raise gdata.service.RequestError({'status': server_response.status,
'reason': server_response.reason,
'body': response_body})
f = open(file_path, 'wb')
f.write(response_body)
f.flush()
f.close()
def MoveIntoFolder(self, source_entry, folder_entry):
"""Moves a document into a folder in the Document List Feed.
Args:
source_entry: DocumentListEntry An object representing the source
document/folder.
folder_entry: DocumentListEntry An object with a link to the destination
folder.
Returns:
A DocumentListEntry containing information about the document created on
the Google Documents service.
"""
entry = gdata.docs.DocumentListEntry()
entry.id = source_entry.id
entry = self.Post(entry, folder_entry.content.src,
converter=gdata.docs.DocumentListEntryFromString)
return entry
def Query(self, uri, converter=gdata.docs.DocumentListFeedFromString):
"""Queries the Document List feed and returns the resulting feed of
entries.
Args:
uri: string The full URI to be queried. This can contain query
parameters, a hostname, or simply the relative path to a Document
List feed. The DocumentQuery object is useful when constructing
query parameters.
converter: func (optional) A function which will be executed on the
retrieved item, generally to render it into a Python object.
By default the DocumentListFeedFromString function is used to
return a DocumentListFeed object. This is because most feed
queries will result in a feed and not a single entry.
"""
return self.Get(uri, converter=converter)
def QueryDocumentListFeed(self, uri):
"""Retrieves a DocumentListFeed by retrieving a URI based off the Document
List feed, including any query parameters. A DocumentQuery object can
be used to construct these parameters.
Args:
uri: string The URI of the feed being retrieved possibly with query
parameters.
Returns:
A DocumentListFeed object representing the feed returned by the server.
"""
return self.Get(uri, converter=gdata.docs.DocumentListFeedFromString)
def GetDocumentListEntry(self, uri):
"""Retrieves a particular DocumentListEntry by its unique URI.
Args:
uri: string The unique URI of an entry in a Document List feed.
Returns:
A DocumentListEntry object representing the retrieved entry.
"""
return self.Get(uri, converter=gdata.docs.DocumentListEntryFromString)
def GetDocumentListFeed(self, uri=None):
"""Retrieves a feed containing all of a user's documents.
Args:
uri: string A full URI to query the Document List feed.
"""
if not uri:
uri = gdata.docs.service.DocumentQuery().ToUri()
return self.QueryDocumentListFeed(uri)
def GetDocumentListAclEntry(self, uri):
"""Retrieves a particular DocumentListAclEntry by its unique URI.
Args:
uri: string The unique URI of an entry in a Document List feed.
Returns:
A DocumentListAclEntry object representing the retrieved entry.
"""
return self.Get(uri, converter=gdata.docs.DocumentListAclEntryFromString)
def GetDocumentListAclFeed(self, uri):
"""Retrieves a feed containing all of a user's documents.
Args:
uri: string The URI of a document's Acl feed to retrieve.
Returns:
A DocumentListAclFeed object representing the ACL feed
returned by the server.
"""
return self.Get(uri, converter=gdata.docs.DocumentListAclFeedFromString)
def Upload(self, media_source, title, folder_or_uri=None, label=None):
"""Uploads a document inside of a MediaSource object to the Document List
feed with the given title.
Args:
media_source: MediaSource The gdata.MediaSource object containing a
document file to be uploaded.
title: string The title of the document on the server after being
uploaded.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
label: optional label describing the type of the document to be created.
Returns:
A DocumentListEntry containing information about the document created
on the Google Documents service.
"""
return self._UploadFile(media_source, title, self._MakeKindCategory(label),
folder_or_uri)
def Download(self, entry_or_id_or_url, file_path, export_format=None,
gid=None, extra_params=None):
"""Downloads a document from the Document List.
Args:
entry_or_id_or_url: a DocumentListEntry, or the resource id of an entry,
or a url to download from (such as the content src).
file_path: string The full path to save the file to.
export_format: the format to convert to, if conversion is required.
gid: grid id, for downloading a single grid of a spreadsheet
extra_params: a map of any further parameters to control how the document
is downloaded
Raises:
RequestError if the service does not respond with success
"""
if isinstance(entry_or_id_or_url, gdata.docs.DocumentListEntry):
url = entry_or_id_or_url.content.src
else:
if self.__RESOURCE_ID_PATTERN.match(entry_or_id_or_url):
url = self._MakeContentLinkFromId(entry_or_id_or_url)
else:
url = entry_or_id_or_url
if export_format is not None:
if url.find('/Export?') == -1:
raise gdata.service.Error('This entry cannot be exported '
'as a different format')
url += '&exportFormat=%s' % export_format
if gid is not None:
if url.find('spreadsheets') == -1:
raise gdata.service.Error('grid id param is not valid for this entry')
url += '&gid=%s' % gid
if extra_params:
url += '&' + urllib.parse.urlencode(extra_params)
self._DownloadFile(url, file_path)
def Export(self, entry_or_id_or_url, file_path, gid=None, extra_params=None):
"""Downloads a document from the Document List in a different format.
Args:
entry_or_id_or_url: a DocumentListEntry, or the resource id of an entry,
or a url to download from (such as the content src).
file_path: string The full path to save the file to. The export
format is inferred from the the file extension.
gid: grid id, for downloading a single grid of a spreadsheet
extra_params: a map of any further parameters to control how the document
is downloaded
Raises:
RequestError if the service does not respond with success
"""
ext = None
match = self.__FILE_EXT_PATTERN.match(file_path)
if match:
ext = match.group(1)
self.Download(entry_or_id_or_url, file_path, ext, gid, extra_params)
def CreateFolder(self, title, folder_or_uri=None):
"""Creates a folder in the Document List feed.
Args:
title: string The title of the folder on the server after being created.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the folder created on
the Google Documents service.
"""
if folder_or_uri:
try:
uri = folder_or_uri.content.src
except AttributeError:
uri = folder_or_uri
else:
uri = '/feeds/documents/private/full'
folder_entry = gdata.docs.DocumentListEntry()
folder_entry.title = atom.Title(text=title)
folder_entry.category.append(self._MakeKindCategory(FOLDER_LABEL))
folder_entry = self.Post(folder_entry, uri,
converter=gdata.docs.DocumentListEntryFromString)
return folder_entry
def MoveOutOfFolder(self, source_entry):
"""Moves a document into a folder in the Document List Feed.
Args:
source_entry: DocumentListEntry An object representing the source
document/folder.
Returns:
True if the entry was moved out.
"""
return self.Delete(source_entry.GetEditLink().href)
# Deprecated methods
#@atom.deprecated('Please use Upload instead')
def UploadPresentation(self, media_source, title, folder_or_uri=None):
"""Uploads a presentation inside of a MediaSource object to the Document
List feed with the given title.
This method is deprecated, use Upload instead.
Args:
media_source: MediaSource The MediaSource object containing a
presentation file to be uploaded.
title: string The title of the presentation on the server after being
uploaded.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the presentation created
on the Google Documents service.
"""
return self._UploadFile(
media_source, title, self._MakeKindCategory(PRESENTATION_LABEL),
folder_or_uri=folder_or_uri)
UploadPresentation = atom.deprecated('Please use Upload instead')(
UploadPresentation)
#@atom.deprecated('Please use Upload instead')
def UploadSpreadsheet(self, media_source, title, folder_or_uri=None):
"""Uploads a spreadsheet inside of a MediaSource object to the Document
List feed with the given title.
This method is deprecated, use Upload instead.
Args:
media_source: MediaSource The MediaSource object containing a spreadsheet
file to be uploaded.
title: string The title of the spreadsheet on the server after being
uploaded.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the spreadsheet created
on the Google Documents service.
"""
return self._UploadFile(
media_source, title, self._MakeKindCategory(SPREADSHEET_LABEL),
folder_or_uri=folder_or_uri)
UploadSpreadsheet = atom.deprecated('Please use Upload instead')(
UploadSpreadsheet)
#@atom.deprecated('Please use Upload instead')
def UploadDocument(self, media_source, title, folder_or_uri=None):
"""Uploads a document inside of a MediaSource object to the Document List
feed with the given title.
This method is deprecated, use Upload instead.
Args:
media_source: MediaSource The gdata.MediaSource object containing a
document file to be uploaded.
title: string The title of the document on the server after being
uploaded.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the document created
on the Google Documents service.
"""
return self._UploadFile(
media_source, title, self._MakeKindCategory(DOCUMENT_LABEL),
folder_or_uri=folder_or_uri)
UploadDocument = atom.deprecated('Please use Upload instead')(
UploadDocument)
"""Calling any of these functions is the same as calling Export"""
DownloadDocument = atom.deprecated('Please use Export instead')(Export)
DownloadPresentation = atom.deprecated('Please use Export instead')(Export)
DownloadSpreadsheet = atom.deprecated('Please use Export instead')(Export)
"""Calling any of these functions is the same as calling MoveIntoFolder"""
MoveDocumentIntoFolder = atom.deprecated(
'Please use MoveIntoFolder instead')(MoveIntoFolder)
MovePresentationIntoFolder = atom.deprecated(
'Please use MoveIntoFolder instead')(MoveIntoFolder)
MoveSpreadsheetIntoFolder = atom.deprecated(
'Please use MoveIntoFolder instead')(MoveIntoFolder)
MoveFolderIntoFolder = atom.deprecated(
'Please use MoveIntoFolder instead')(MoveIntoFolder)
class DocumentQuery(gdata.service.Query):
"""Object used to construct a URI to query the Google Document List feed"""
def __init__(self, feed='/feeds/documents', visibility='private',
projection='full', text_query=None, params=None,
categories=None):
"""Constructor for Document List Query
Args:
feed: string (optional) The path for the feed. (e.g. '/feeds/documents')
visibility: string (optional) The visibility chosen for the current feed.
projection: string (optional) The projection chosen for the current feed.
text_query: string (optional) The contents of the q query parameter. This
string is URL escaped upon conversion to a URI.
params: dict (optional) Parameter value string pairs which become URL
params when translated to a URI. These parameters are added to
the query's items.
categories: list (optional) List of category strings which should be
included as query categories. See gdata.service.Query for
additional documentation.
Yields:
A DocumentQuery object used to construct a URI based on the Document
List feed.
"""
self.visibility = visibility
self.projection = projection
gdata.service.Query.__init__(self, feed, text_query, params, categories)
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI used to retrieve entries from the Document
List feed.
"""
old_feed = self.feed
self.feed = '/'.join([old_feed, self.visibility, self.projection])
new_feed = gdata.service.Query.ToUri(self)
self.feed = old_feed
return new_feed
def AddNamedFolder(self, email, folder_name):
"""Adds a named folder category, qualified by a schema.
This function lets you query for documents that are contained inside a
named folder without fear of collision with other categories.
Args:
email: string The email of the user who owns the folder.
folder_name: string The name of the folder.
Returns:
The string of the category that was added to the object.
"""
category = '{%s%s}%s' % (FOLDERS_SCHEME_PREFIX, email, folder_name)
self.categories.append(category)
return category
def RemoveNamedFolder(self, email, folder_name):
"""Removes a named folder category, qualified by a schema.
Args:
email: string The email of the user who owns the folder.
folder_name: string The name of the folder.
Returns:
The string of the category that was removed to the object.
"""
category = '{%s%s}%s' % (FOLDERS_SCHEME_PREFIX, email, folder_name)
self.categories.remove(category)
return category
class DocumentAclQuery(gdata.service.Query):
"""Object used to construct a URI to query a Document's ACL feed"""
def __init__(self, resource_id, feed='/feeds/acl/private/full'):
"""Constructor for Document ACL Query
Args:
resource_id: string The resource id. (e.g. 'document%3Adocument_id',
'spreadsheet%3Aspreadsheet_id', etc.)
feed: string (optional) The path for the feed.
(e.g. '/feeds/acl/private/full')
Yields:
A DocumentAclQuery object used to construct a URI based on the Document
ACL feed.
"""
self.resource_id = resource_id
gdata.service.Query.__init__(self, feed)
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI used to retrieve entries from the Document
ACL feed.
"""
return '%s/%s' % (gdata.service.Query.ToUri(self), self.resource_id)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#  MongoDB README
Welcome to MongoDB!
## Components
- `mongod` - The database server.
- `mongos` - Sharding router.
## Download MongoDB
- https://www.mongodb.com/try/download/community
- Using homebrew `brew tap mongodb/brew`
- Using docker image `docker pull mongodb/mongodb-community-server`
## Download the MongoDB Shell
- https://www.mongodb.com/try/download/shell
- Using homebrew `brew install mongosh`
## Building
See [Building MongoDB](docs/building.md).
## Running
For command line options invoke:
```bash
$ ./mongod --help
```
To run a single server database:
```bash
$ sudo mkdir -p /data/db
$ ./mongod
$
$ # The mongosh shell connects to localhost and test database by default:
$ ./mongosh
test> help
```
## Installing Compass
You can install compass using the `install_compass` script packaged with MongoDB:
```bash
$ ./install_compass
```
This will download the appropriate MongoDB Compass package for your platform
and install it.
## Drivers
Client drivers for most programming languages are available at
https://docs.mongodb.com/manual/applications/drivers/.
## Bug Reports
See https://github.com/mongodb/mongo/wiki/Submit-Bug-Reports.
## Packaging
Packages are created dynamically by the [buildscripts/packager.py](buildscripts/packager.py) script.
This will generate RPM and Debian packages.
## Learn MongoDB
- Documentation - https://docs.mongodb.com/manual/
- Developer Center - https://www.mongodb.com/developer/
- MongoDB University - https://learn.mongodb.com
## Cloud Hosted MongoDB
https://www.mongodb.com/cloud/atlas
## Forums
- https://mongodb.com/community/forums/
Technical questions about using MongoDB.
- https://mongodb.com/community/forums/c/server-dev
Technical questions about building and developing MongoDB.
## LICENSE
MongoDB is free and the source is available. Versions released prior to
October 16, 2018 are published under the AGPL. All versions released after
October 16, 2018, including patch fixes for prior versions, are published
under the [Server Side Public License (SSPL) v1](LICENSE-Community.txt).
See individual files for details which will specify the license applicable
to each file. Files subject to the SSPL will be noted in their headers.
|
unknown
|
github
|
https://github.com/mongodb/mongo
|
README.md
|
## Input
```javascript
// @enablePreserveExistingMemoizationGuarantees:false
import {Stringify, identity} from 'shared-runtime';
function foo() {
try {
identity(`${Symbol('0')}`); // Uncaught TypeError: Cannot convert a Symbol value to a string (leave as is)
} catch {}
return (
<Stringify
value={[
`` === '',
`\n` === '\n',
`a\nb`,
`\n`,
`a${1}b`,
` abc \u0041\n\u000a\ŧ`,
`abc${1}def`,
`abc${1}def${2}`,
`abc${1}def${2}ghi`,
`a${1 + 3}b${``}c${'d' + `e${2 + 4}f`}`,
`1${2}${Math.sin(0)}`,
`${NaN}`,
`${Infinity}`,
`${-Infinity}`,
`${Number.MAX_SAFE_INTEGER}`,
`${Number.MIN_SAFE_INTEGER}`,
`${Number.MAX_VALUE}`,
`${Number.MIN_VALUE}`,
`${-0}`,
`
`,
`${{}}`,
`${[1, 2, 3]}`,
`${true}`,
`${false}`,
`${null}`,
`${undefined}`,
`123456789${0}`,
`${0}123456789`,
`${0}123456789${0}`,
`${0}1234${5}6789${0}`,
`${0}1234${`${0}123456789${`${0}123456789${0}`}`}6789${0}`,
`${0}1234${`${0}123456789${`${identity(0)}`}`}6789${0}`,
`${`${`${`${0}`}`}`}`,
`${`${`${`${''}`}`}`}`,
`${`${`${`${identity('')}`}`}`}`,
]}
/>
);
}
export const FIXTURE_ENTRYPOINT = {
fn: foo,
params: [],
isComponent: false,
};
```
## Code
```javascript
import { c as _c } from "react/compiler-runtime"; // @enablePreserveExistingMemoizationGuarantees:false
import { Stringify, identity } from "shared-runtime";
function foo() {
const $ = _c(1);
try {
identity(`${Symbol("0")}`);
} catch {}
let t0;
if ($[0] === Symbol.for("react.memo_cache_sentinel")) {
t0 = (
<Stringify
value={[
true,
true,
"a\nb",
"\n",
"a1b",
" abc A\n\n\u0167",
"abc1def",
"abc1def2",
"abc1def2ghi",
"a4bcde6f",
`1${2}${Math.sin(0)}`,
`${NaN}`,
`${Infinity}`,
`${-Infinity}`,
`${Number.MAX_SAFE_INTEGER}`,
`${Number.MIN_SAFE_INTEGER}`,
`${Number.MAX_VALUE}`,
`${Number.MIN_VALUE}`,
"0",
"\n ",
`${{}}`,
`${[1, 2, 3]}`,
"true",
"false",
"null",
`${undefined}`,
"1234567890",
"0123456789",
"01234567890",
"01234567890",
"0123401234567890123456789067890",
`${0}1234${`${0}123456789${`${identity(0)}`}`}6789${0}`,
"0",
"",
`${`${`${`${identity("")}`}`}`}`,
]}
/>
);
$[0] = t0;
} else {
t0 = $[0];
}
return t0;
}
export const FIXTURE_ENTRYPOINT = {
fn: foo,
params: [],
isComponent: false,
};
```
### Eval output
(kind: ok) <div>{"value":[true,true,"a\nb","\n","a1b"," abc A\n\nŧ","abc1def","abc1def2","abc1def2ghi","a4bcde6f","120","NaN","Infinity","-Infinity","9007199254740991","-9007199254740991","1.7976931348623157e+308","5e-324","0","\n ","[object Object]","1,2,3","true","false","null","undefined","1234567890","0123456789","01234567890","01234567890","0123401234567890123456789067890","012340123456789067890","0","",""]}</div>
|
unknown
|
github
|
https://github.com/facebook/react
|
compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/constant-propagation-template-literal.expect.md
|
# -*- coding: utf-8 -*-
"""
pygments.styles.fruity
~~~~~~~~~~~~~~~~~~~~~~
pygments version of my "fruity" vim theme.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Token, Comment, Name, Keyword, \
Generic, Number, String, Whitespace
class FruityStyle(Style):
"""
Pygments version of the "native" vim theme.
"""
background_color = '#111111'
highlight_color = '#333333'
styles = {
Whitespace: '#888888',
Token: '#ffffff',
Generic.Output: '#444444 bg:#222222',
Keyword: '#fb660a bold',
Keyword.Pseudo: 'nobold',
Number: '#0086f7 bold',
Name.Tag: '#fb660a bold',
Name.Variable: '#fb660a',
Comment: '#008800 bg:#0f140f italic',
Name.Attribute: '#ff0086 bold',
String: '#0086d2',
Name.Function: '#ff0086 bold',
Generic.Heading: '#ffffff bold',
Keyword.Type: '#cdcaa9 bold',
Generic.Subheading: '#ffffff bold',
Name.Constant: '#0086d2',
Comment.Preproc: '#ff0007 bold'
}
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
# ale_python_test_pygame_player.py
# Author: Ben Goodrich
#
# This modified ale_python_test_pygame.py to provide a fully interactive experience allowing the player
# to play. RAM Contents, current action, and reward are also displayed.
# keys are:
# arrow keys -> up/down/left/right
# z -> fire button
import sys
from ale_python_interface import ALEInterface
import numpy as np
import pygame
key_action_tform_table = (
0, #00000 none
2, #00001 up
5, #00010 down
2, #00011 up/down (invalid)
4, #00100 left
7, #00101 up/left
9, #00110 down/left
7, #00111 up/down/left (invalid)
3, #01000 right
6, #01001 up/right
8, #01010 down/right
6, #01011 up/down/right (invalid)
3, #01100 left/right (invalid)
6, #01101 left/right/up (invalid)
8, #01110 left/right/down (invalid)
6, #01111 up/down/left/right (invalid)
1, #10000 fire
10, #10001 fire up
13, #10010 fire down
10, #10011 fire up/down (invalid)
12, #10100 fire left
15, #10101 fire up/left
17, #10110 fire down/left
15, #10111 fire up/down/left (invalid)
11, #11000 fire right
14, #11001 fire up/right
16, #11010 fire down/right
14, #11011 fire up/down/right (invalid)
11, #11100 fire left/right (invalid)
14, #11101 fire left/right/up (invalid)
16, #11110 fire left/right/down (invalid)
14 #11111 fire up/down/left/right (invalid)
)
if(len(sys.argv) > 2):
print("Usage ./ale_python_test_pygame_player.py <ROM_FILE_NAME>")
sys.exit()
ale = ALEInterface()
max_frames_per_episode = ale.getInt(b"max_num_frames_per_episode");
##ale.set("random_seed",123)
# Get & Set the desired settings
ale.setInt(b'random_seed', 123)
ale.setInt(b'frame_skip', 1)
# Set USE_SDL to true to display the screen. ALE must be compilied
# with SDL enabled for this to work. On OSX, pygame init is used to
# proxy-call SDL_main.
USE_SDL = True
if USE_SDL:
if sys.platform == 'darwin':
pygame.init()
ale.setBool(b'sound', False) # Sound doesn't work on OSX
elif sys.platform.startswith('linux'):
ale.setBool(b'sound', True)
ale.setBool(b'display_screen', True)
random_seed = ale.getInt(b"random_seed")
print("random_seed: " + str(random_seed))
##rom_file = str.encode(sys.argv[1])
##ale.loadROM(rom_file)
##print(rom_file)
ale.loadROM(b"/Users/logancross/Documents/Atari/Arcade-Learning-Environment-master/ale_python_interface/examples/space_invaders.bin")
legal_actions = ale.getMinimalActionSet()
print(legal_actions)
(screen_width,screen_height) = ale.getScreenDims()
print("width/height: " +str(screen_width) + "/" + str(screen_height))
(display_width,display_height) = (1024,420)
#init pygame
pygame.init()
screen = pygame.display.set_mode((display_width,display_height))
pygame.display.set_caption("Arcade Learning Environment Player Agent Display")
pygame.key.set_repeat(1,5)
game_surface = pygame.Surface((screen_width,screen_height))
pygame.display.flip()
#init clock
clock = pygame.time.Clock()
episode = 0
total_reward = 0.0
while(episode < 10):
#get the keys
a = 0;
keys = 0
pressed = pygame.key.get_pressed()
keys |= pressed[pygame.K_UP]
keys |= pressed[pygame.K_DOWN] <<1
keys |= pressed[pygame.K_LEFT] <<2
keys |= pressed[pygame.K_RIGHT] <<3
keys |= pressed[pygame.K_z] <<4
b = key_action_tform_table[keys]
for event in pygame.event.get():
##print(event)
if event.type == pygame.KEYDOWN and event.key == pygame.K_UP:
a = 2;
elif event.type == pygame.KEYDOWN and event.key == pygame.K_DOWN:
a = 5;
elif event.type == pygame.KEYDOWN and event.key == pygame.K_LEFT:
a = 4;
elif event.type == pygame.KEYDOWN and event.key == pygame.K_RIGHT:
a = 3;
##print("right")
elif event.type == pygame.KEYDOWN and event.key == pygame.K_z:
a = 1;
elif event.type == pygame.QUIT:
episode = 10
break;
else:
a = 0;
if pressed[pygame.K_UP]: b = 2 #######
if pressed[pygame.K_DOWN]: b = 5 #######
if pressed[pygame.K_LEFT]: b = 4 #######
if pressed[pygame.K_RIGHT]: b = 3 #######
if pressed[pygame.K_z]: b = 1 #######
reward = ale.act(a);
total_reward += reward
#clear screen
screen.fill((0,0,0))
#get atari screen pixels and blit them
numpy_surface = np.frombuffer(game_surface.get_buffer(),dtype=np.int32)
ale.getScreenRGB(numpy_surface)
del numpy_surface
screen.blit(pygame.transform.scale2x(game_surface),(0,0))
#get RAM
ram_size = ale.getRAMSize()
ram = np.zeros((ram_size),dtype=np.uint8)
ale.getRAM(ram)
#Display ram bytes
font = pygame.font.SysFont("Ubuntu Mono",32)
text = font.render("RAM: " ,1,(255,208,208))
screen.blit(text,(330,10))
font = pygame.font.SysFont("Ubuntu Mono",25)
height = font.get_height()*1.2
line_pos = 40
ram_pos = 0
while(ram_pos < 128):
ram_string = ''.join(["%02X "%ram[x] for x in range(ram_pos,min(ram_pos+16,128))])
text = font.render(ram_string,1,(255,255,255))
screen.blit(text,(340,line_pos))
line_pos += height
ram_pos +=16
pygame.display.flip()
#delay to 60fps
clock.tick(60.)
if(ale.game_over()):
episode_frame_number = ale.getEpisodeFrameNumber()
frame_number = ale.getFrameNumber()
print("Frame Number: " + str(frame_number) + " Episode Frame Number: " + str(episode_frame_number))
print("Episode " + str(episode) + " ended with score: " + str(total_reward))
ale.reset_game()
total_reward = 0.0
episode = episode + 1
|
unknown
|
codeparrot/codeparrot-clean
| ||
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8" />
<title>Animations</title>
<base href="/" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<link rel="icon" type="image/x-icon" href="favicon.ico" />
</head>
<body>
<app-root></app-root>
</body>
</html>
|
html
|
github
|
https://github.com/angular/angular
|
integration/legacy-animations-async/src/index.html
|
# -*- coding: utf-8 -*-
# Для тестовых целей: создание проекции данных с ограниченным числом узлов
def json_semantic(request):
G = nx.Graph() # Cоздаём пустой NetworkX-граф
# Создаём объект типа cusros, который позволяет нам подключиться и работаться с базой данных,
# содержащей данные многомерной матрицы
cursor = connections['mysql'].cursor()
offset = 0 # Начало первой возвращаемой строки
rows = 100 # Максимальное количество возвращаемых строк
# Формируем sql-запрос к таблице elements, содержащей информационные объекты (далее ИО).
# Данные объекты, не имеющих связей - ent_or_rel=0 - являются вершинами нашего графа
sql = "SELECT el.id, el.data FROM elements as el WHERE el.ent_or_rel=0 LIMIT "+str(offset)+","+str(rows)
cursor.execute(sql) # Выполняем sql-запрос
nodes = cursor.fetchall() # Получаем массив значений результата sql-запроса
# В цикле проходимся по каждой строке результата запроса
# и добавляем в граф узлы
for node in nodes:
# Вызываем функцию, добавляющую узел в граф, где:
# node[0] - id узла;
# G - граф;
# node[1] - не обязательное поле data, которое мы используем в качестве одного из атрибутов узла;
add_node_from_db(node[0], G, node[1])
# Далее для этого узла ищем дуги и добавляем их в граф:
# формируем sql-запрос к таблице relations, описывающей связи между ИО,
# и таблице elements, откуда мы получаем поле data для текстового обозначения связи.
# Эти связи являются дугами нашего графа.
sql = "SELECT rel.id, rel.arg1, rel.arg2, el.data FROM relations as rel, elements as el WHERE rel.id = el.id AND (rel.arg1="+str(node[0])+" OR rel.arg2="+str(node[0])+")"
cursor.execute(sql) # Выполняем sql-запрос
edges = cursor.fetchall() # Получаем массив значений результата sql-запроса
# Проходимся в цикле по всем строкам результата sql-запроса и добавляем в граф дуги.
for edge in edges:
# Для каждой дуги с помощью отдельной функции получаем словарь атрибутов.
edgeAttributes = get_edge_attributes(edge[0])
# Добавляем в граф дугу с атрибутами id и data,
# а также, с полученным отдельно словарем атрибутов - attributes
# Возможна ситуация, когда один из узлов дуги ещё не добавлен в граф,
# В этом случае, при выполнении функции add_edge() узел будет добавлен автоматически,
# но без необходимых аттрибутов: это исправляется вызовом функции add_node_from_db().
G.add_edge(edge[1], edge[2], id=edge[0], data=edge[3], attributes=edgeAttributes)
add_node_from_db(int(edge[1]), G) # Добавляем к первому узлу дуги необходимые аттрибуты
add_node_from_db(int(edge[2]), G) # Добавляем ко второму узлу дуги необходимые аттрибуты
# Средствами бибилиотеки NetworkX,
# экспортируем граф в виде подходящeм для json-сериализации
graphData = json_graph.node_link_data(G)
# Преобразуем данные в json-формат
result = json.dumps(graphData, sort_keys=True, indent=4, separators=(',', ': '))
response = HttpResponse() # Создаём объект response для динамического создания html-страницы
response['Content-Type'] = "text/javascript; charset=utf-8" # Объявляем основные мета-данные html-страницы
response.write(result) # Записываем в объкт response полученную структуру графа в json-формате
# возвращаем все необходимые фреймворку Django данные для окончательной генерации html-страницы
return response
# Для тестовых целей:
# Создание графа - многомерной проекции "семантической кучи" - первым методом
def create_graph_method_01():
G = nx.Graph() # Cоздаём пустой NetworkX-граф
# Создаём объект типа cusros, который позволяет нам подключиться и работаться с базой данных,
# содержащей данные многомерной матрицы
cursor = connections['mysql'].cursor()
# Формируем sql-запрос к таблице elements, содержащей информационные объекты (далее ИО).
# Данные объекты, не имеющих связей - ent_or_rel=0 - являются вершинами нашего графа
sql = "SELECT el.id, el.data FROM elements as el WHERE el.ent_or_rel=0"
cursor.execute(sql) # Выполняем sql-запрос
nodes = cursor.fetchall() # Получаем массив значений результата sql-запроса
# В цикле проходимся по каждой строке результата запроса
# и добавляем в граф узлы
for node in nodes:
# Вызываем функцию, добавляющую узел в граф, где:
# node[0] - id узла;
# G - граф;
# node[1] - не обязательное поле data, которое мы используем в качестве одного из атрибутов узла;
add_node_from_db(node[0], G, node[1])
# Далее для этого узла ищем дуги и добавляем их в граф:
# формируем sql-запрос к таблице relations, описывающей связи между ИО,
# и таблице elements, откуда мы получаем поле data для текстового обозначения связи.
# Эти связи являются дугами нашего графа.
sql = "SELECT rel.id, rel.arg1, rel.arg2, el.data FROM relations as rel, elements as el WHERE rel.id = el.id AND (rel.arg1="+str(node[0])+" OR rel.arg2="+str(node[0])+")"
cursor.execute(sql) # Выполняем sql-запрос
edges = cursor.fetchall() # Получаем массив значений результата sql-запроса
# Проходимся в цикле по всем строкам результата sql-запроса и добавляем в граф дуги.
for edge in edges:
# Для каждой дуги с помощью отдельной функции получаем словарь атрибутов.
edgeAttributes = get_edge_attributes_from_db(edge[0])
# Добавляем в граф дугу с атрибутами id и data,
# а также, с полученным отдельно словарем атрибутов - attributes
# Возможна ситуация, когда один из узлов дуги ещё не добавлен в граф,
# В этом случае, при выполнении функции add_edge() узел будет добавлен автоматически,
# но без необходимых аттрибутов: это исправляется вызовом функции add_node_from_db().
G.add_edge(edge[1], edge[2], id=edge[0], data=edge[3], attributes=edgeAttributes)
add_node_from_db(int(edge[1]), G) # Добавляем к первому узлу дуги необходимые аттрибуты
add_node_from_db(int(edge[2]), G) # Добавляем ко второму узлу дуги необходимые аттрибуты
# Средствами бибилиотеки NetworkX,
# экспортируем граф в виде подходящeм для json-сериализации
data = json_graph.node_link_data(G)
# Создаём экземпляр класса Graph, для хранения структуры графа в базе данных
graph = Graph()
graph.title = "Semantic" # Определяем заголовок графа
graph.body = json.dumps(data) # Преобразуем данные в json-формат
graph.save() # Сохраняем граф в собственную базу данных
return True
# Для тестовых целей:
# Создание графа - многомерной проекции "семантической кучи" - вторым методом
def create_graph_method_02():
# Cоздаём пустой NetworkX-граф
G = nx.Graph()
# Устанавливаем соединение с БД, в которой хранятся семантически связанные данные
cursor = connections['mysql'].cursor()
sql = "SELECT rel.id, rel.arg1, rel.arg2, el.data FROM relations as rel, elements as el WHERE rel.id = el.id"
cursor.execute(sql) # Выполняем sql-запрос
edges = cursor.fetchall() # Получаем массив значений результата sql-запроса
# Проходимся в цикле по всем строкам результата sql-запроса и добавляем в граф дуги.
for edge in edges:
# Для каждой дуги с помощью отдельной функции получаем словарь атрибутов.
edgeAttributes = get_edge_attributes_from_db(edge[0])
G.add_edge(edge[1], edge[2], id=edge[0], data=edge[3], attributes=edgeAttributes)
add_node_from_db(int(edge[1]), G)
add_node_from_db(int(edge[2]), G)
# Средствами бибилиотеки NetworkX,
# экспортируем граф в виде подходящeм для json-сериализации
data = json_graph.node_link_data(G)
# Создаём экземпляр класса Graph, для хранения структуры графа в базе данных
graph = Graph()
# Определяем заголовок графа
graph.title = "Многомерная проекция 'семантической кучи' по заданному фильтру"
# Преобразуем данные в json-формат
graph.body = json.dumps(data)
# Сохраняем граф в собственную базу данных
graph.save()
return True
"""
class ObrIndicatorSerializer(serializers.ModelSerializer):
class Meta:
model = ObrIndicator
fields = ('pk', 'stage', 'procent')
order_by = 'pk'
class ObrProgressSerializer(serializers.ModelSerializer):
indicator = ObrIndicatorSerializer(many=True, read_only=True)
class Meta:
model = Obr
fields = ('pk', 'status', 'statusText', 'fileData', 'indicator')
class CamSerializer(serializers.ModelSerializer):
class Meta:
model = Cam
fields = ('id',
'fileData',
'modelKamery',
'dataiVremjaSjomki',
'kolvoKadrovvRolike',
'kolvoStrok',
'kolvoTochekvStroke',
'temperaturnajaShkala',
'stepenChernoty',
'temperaturaFona',
'prozrachnostSredy',
'temperaturaSredy',
'distancijaSjomki',
'razmerPikselja',
'apertura',
'fokusnoeRasstojanie',
)
"""
|
unknown
|
codeparrot/codeparrot-clean
| ||
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _, ungettext
from mss.lib.xmlrpc import XmlRpc
xmlrpc = XmlRpc()
logger = logging.getLogger(__name__)
class Steps:
PREINST = "preinst"
DOWNLOAD = "download"
MEDIAS_AUTH = "medias_auth"
MEDIAS_ADD = "medias_add"
INSTALL = "install"
CONFIG = "config"
END = "end"
class State:
DISABLED = -1
TODO = 0
DONE = 1
class Transaction(object):
def __init__(self, request, modules_list=None):
if modules_list is not None:
self.modules_list = modules_list
self.setup()
else:
self.modules_list = request.session['modules_list']
self.steps = request.session['steps']
self.update()
self.save(request)
def setup(self):
result = xmlrpc.call('preinstall_modules', self.modules_list)
self.modules_info = result
self.modules_list = [m['slug'] for m in self.modules_info]
self.steps = [
{
'id': Steps.PREINST,
'state': State.TODO,
'title': _("Installation summary"),
'info': ungettext(
"The following addon will be installed.",
"The following addons will be installed.",
len(self.modules_list)
),
'show_modules': True,
'current': False
},
{
'id': Steps.DOWNLOAD,
'state': State.DISABLED,
'title': _("Addon download"),
'info': _("Download addons from the ServicePlace"),
'current': False,
},
{
'id': Steps.MEDIAS_AUTH,
'state': State.DISABLED,
'title': _("Medias authentication"),
'info': _("One or more medias need authentication"),
'current': False,
},
{
'id': Steps.MEDIAS_ADD,
'state': State.DISABLED,
'title': _("Medias add"),
'info': "",
'current': False,
},
{
'id': Steps.INSTALL,
'state': State.DISABLED,
'title': _("Installation"),
'info': "",
'current': False,
},
{
'id': Steps.CONFIG,
'state': State.DISABLED,
'title': _("Initial configuration"),
'info': "",
'current': False
},
{
'id': Steps.END,
'state': State.TODO,
'title': _("End of installation"),
'info': _("The installation is finished."),
'reboot': False,
'current': False
}
]
for module in self.modules_info:
if not module['installed'] or not module["downloaded"]:
self.todo_step(Steps.INSTALL)
if not module["downloaded"]:
self.todo_step(Steps.DOWNLOAD)
if module["has_repositories"]:
self.todo_step(Steps.MEDIAS_ADD)
if module["has_restricted_repositories"]:
self.todo_step(Steps.MEDIAS_AUTH)
if module["has_configuration"] or module["has_configuration_script"] or not module["downloaded"]:
self.todo_step(Steps.CONFIG)
if module['reboot']:
self.update_step({'id': Steps.END,
'title': _("Reboot"),
'reboot': True,
'info': _("The installation is finished. The server must be rebooted.")})
def update(self):
self.modules_info = xmlrpc.call('get_modules_details', self.modules_list)
downloaded = True
has_repositories = False
has_restricted_repositories = False
installed = True
configured = True
for module in self.modules_info:
if not module['downloaded']:
downloaded = False
if module['has_repositories']:
has_repositories = True
if module['has_restricted_repositories']:
has_restricted_repositories = True
if not module['installed']:
installed = False
if module["has_configuration_script"] and not module['configured']:
configured = False
if not module['downloaded']:
configured = False
if self.get_state_step(Steps.DOWNLOAD) == State.TODO and downloaded:
self.done_step(Steps.DOWNLOAD)
if self.get_state_step(Steps.INSTALL) == State.TODO and installed:
self.done_step(Steps.INSTALL)
if self.get_state_step(Steps.MEDIAS_AUTH) == State.TODO and not has_restricted_repositories:
self.done_step(Steps.MEDIAS_AUTH)
if self.get_state_step(Steps.MEDIAS_ADD) == State.TODO and not has_repositories:
self.done_step(Steps.MEDIAS_ADD)
if self.get_state_step(Steps.CONFIG) == State.TODO and configured:
self.done_step(Steps.CONFIG)
def save(self, request):
request.session['modules_list'] = self.modules_list
request.session['steps'] = self.steps
def find_step(self, step):
for s in self.steps:
if s['id'] == step:
return s
raise Exception("Step does not exist ?!")
def get_state_step(self, step):
return self.find_step(step)['state']
def todo_step(self, step):
self.find_step(step)['state'] = State.TODO
def done_step(self, step):
self.find_step(step)['state'] = State.DONE
def update_step(self, step):
for s in self.steps:
if s['id'] == step['id']:
for key, value in step.items():
s[key] = value
def current_step(self):
for s in self.steps:
if s['current']:
return s
def set_current_step(self, step):
for s in self.steps:
if s['id'] == step:
s['current'] = True
else:
s['current'] = False
def first_step(self):
for step in self.steps:
if not step['state'] in (State.DONE, State.DISABLED):
return step
def next_step(self):
next = False
for step in self.steps:
if next and not step['state'] in (State.DONE, State.DISABLED):
return step
if step['current']:
next = True
# no next step, return home
return {'id': 'sections'}
def next_step_url(self):
return reverse(self.next_step()['id'])
def first_step_url(self):
return reverse(self.first_step()['id'])
def current_step_url(self):
return reverse(self.current_step()['id'])
|
unknown
|
codeparrot/codeparrot-clean
| ||
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
An example demonstrating k-means clustering.
Run with:
bin/spark-submit examples/src/main/python/ml/kmeans_example.py
This example requires NumPy (http://www.numpy.org/).
"""
from __future__ import print_function
# $example on$
from pyspark.ml.clustering import KMeans
from pyspark.ml.evaluation import ClusteringEvaluator
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("KMeansExample")\
.getOrCreate()
# $example on$
# Loads data.
dataset = spark.read.format("libsvm").load("data/mllib/sample_kmeans_data.txt")
# Trains a k-means model.
kmeans = KMeans().setK(2).setSeed(1)
model = kmeans.fit(dataset)
# Make predictions
predictions = model.transform(dataset)
# Evaluate clustering by computing Silhouette score
evaluator = ClusteringEvaluator()
silhouette = evaluator.evaluate(predictions)
print("Silhouette with squared euclidean distance = " + str(silhouette))
# Shows the result.
centers = model.clusterCenters()
print("Cluster Centers: ")
for center in centers:
print(center)
# $example off$
spark.stop()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from .container_auto_param import ContainerAutoParam
from .local_environment_param import LocalEnvironmentParam
from .container_reference_param import ContainerReferenceParam
__all__ = ["FunctionShellToolParam", "Environment"]
Environment: TypeAlias = Union[ContainerAutoParam, LocalEnvironmentParam, ContainerReferenceParam]
class FunctionShellToolParam(TypedDict, total=False):
"""A tool that allows the model to execute shell commands."""
type: Required[Literal["shell"]]
"""The type of the shell tool. Always `shell`."""
environment: Optional[Environment]
|
python
|
github
|
https://github.com/openai/openai-python
|
src/openai/types/responses/function_shell_tool_param.py
|
/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_JIT_PJRT_TENSOR_BUFFER_H_
#define TENSORFLOW_COMPILER_JIT_PJRT_TENSOR_BUFFER_H_
#include <memory>
#include <utility>
#include "xla/pjrt/pjrt_client.h"
#include "tensorflow/core/framework/allocation_description.pb.h"
#include "tensorflow/core/framework/tensor.h"
namespace tensorflow {
// PjRtTensorBuffer is derived from TensorBuffer, which holds a device memory
// pointer so that legacy TF kernel can access it directly. PjRtTensorBuffer
// also owns a PjRtBuffer for XLA kernel's usage.
class PjRtTensorBuffer : public TensorBuffer {
public:
PjRtTensorBuffer(const void* ptr, size_t expected_size,
std::unique_ptr<xla::PjRtBuffer> pjrt_buffer)
: TensorBuffer(const_cast<void*>(ptr)),
expected_size_(expected_size),
pjrt_buffer_(std::move(pjrt_buffer)) {}
size_t size() const override { return expected_size_; }
TensorBuffer* root_buffer() override { return this; }
xla::PjRtBuffer* pjrt_buffer() const { return pjrt_buffer_.get(); }
// TODO(b/288965065): Implement this.
void FillAllocationDescription(AllocationDescription* proto) const override {
proto->set_requested_bytes(static_cast<int64_t>(expected_size_));
}
private:
size_t expected_size_;
std::unique_ptr<xla::PjRtBuffer> pjrt_buffer_;
};
} // namespace tensorflow
#endif // TENSORFLOW_COMPILER_JIT_PJRT_TENSOR_BUFFER_H_
|
c
|
github
|
https://github.com/tensorflow/tensorflow
|
tensorflow/compiler/jit/pjrt_tensor_buffer.h
|
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.bootstrap;
import java.util.function.Supplier;
import org.jspecify.annotations.Nullable;
import org.springframework.context.ApplicationContext;
import org.springframework.core.env.Environment;
/**
* A simple bootstrap context that is available during startup and {@link Environment}
* post-processing up to the point that the {@link ApplicationContext} is prepared.
* <p>
* Provides lazy access to singletons that may be expensive to create, or need to be
* shared before the {@link ApplicationContext} is available.
* <p>
* Instances are registered by type. The contact may return {@code null} values when a
* type has been registered but no value is actually supplied.
*
* @author Phillip Webb
* @since 4.0.0
* @since 2.4.0
* @see BootstrapRegistry
*/
public interface BootstrapContext {
/**
* Return an instance from the context if the type has been registered. The instance
* will be created if it hasn't been accessed previously.
* @param <T> the instance type
* @param type the instance type
* @return the instance managed by the context, which may be {@code null}
* @throws IllegalStateException if the type has not been registered
*/
<T> @Nullable T get(Class<T> type) throws IllegalStateException;
/**
* Return an instance from the context if the type has been registered. The instance
* will be created if it hasn't been accessed previously.
* @param <T> the instance type
* @param type the instance type
* @param other the instance to use if the type has not been registered
* @return the instance, which may be {@code null}
*/
<T> @Nullable T getOrElse(Class<T> type, @Nullable T other);
/**
* Return an instance from the context if the type has been registered. The instance
* will be created if it hasn't been accessed previously.
* @param <T> the instance type
* @param type the instance type
* @param other a supplier for the instance to use if the type has not been registered
* @return the instance, which may be {@code null}
*/
<T> @Nullable T getOrElseSupply(Class<T> type, Supplier<@Nullable T> other);
/**
* Return an instance from the context if the type has been registered. The instance
* will be created if it hasn't been accessed previously.
* @param <T> the instance type
* @param <X> the exception to throw if the type is not registered
* @param type the instance type
* @param exceptionSupplier the supplier which will return the exception to be thrown
* @return the instance managed by the context, which may be {@code null}
* @throws X if the type has not been registered
*/
<T, X extends Throwable> @Nullable T getOrElseThrow(Class<T> type, Supplier<? extends X> exceptionSupplier)
throws X;
/**
* Return if a registration exists for the given type.
* @param <T> the instance type
* @param type the instance type
* @return {@code true} if the type has already been registered
*/
<T> boolean isRegistered(Class<T> type);
}
|
java
|
github
|
https://github.com/spring-projects/spring-boot
|
core/spring-boot/src/main/java/org/springframework/boot/bootstrap/BootstrapContext.java
|
"""
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.datasets.base import _pkl_filepath
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = DIRECTORY_URL + "samples.zip"
COVERAGES_URL = DIRECTORY_URL + "coverages.zip"
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = int(header[b'NODATA_value'])
if nodata != -9999:
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME)
if not exists(archive_path):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, archive_path, compress=9)
else:
bunch = joblib.load(archive_path)
return bunch
|
unknown
|
codeparrot/codeparrot-clean
| ||
---
name: Push README to Docker Hub
on:
push:
paths:
- "README.md"
- "README-containers.md"
- ".github/workflows/container_description.yml"
branches: [ main, master ]
permissions:
contents: read
jobs:
PushDockerHubReadme:
runs-on: ubuntu-latest
name: Push README to Docker Hub
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- name: Set docker hub repo name
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
- name: Push README to Dockerhub
uses: christian-korneck/update-container-description-action@d36005551adeaba9698d8d67a296bd16fa91f8e8 # v1
env:
DOCKER_USER: ${{ secrets.DOCKER_HUB_LOGIN }}
DOCKER_PASS: ${{ secrets.DOCKER_HUB_PASSWORD }}
with:
destination_container_repo: ${{ env.DOCKER_REPO_NAME }}
provider: dockerhub
short_description: ${{ env.DOCKER_REPO_NAME }}
# Empty string results in README-containers.md being pushed if it
# exists. Otherwise, README.md is pushed.
readme_file: ''
PushQuayIoReadme:
runs-on: ubuntu-latest
name: Push README to quay.io
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- name: Set quay.io org name
run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV
- name: Set quay.io repo name
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
- name: Push README to quay.io
uses: christian-korneck/update-container-description-action@d36005551adeaba9698d8d67a296bd16fa91f8e8 # v1
env:
DOCKER_APIKEY: ${{ secrets.QUAY_IO_API_TOKEN }}
with:
destination_container_repo: ${{ env.DOCKER_REPO_NAME }}
provider: quay
# Empty string results in README-containers.md being pushed if it
# exists. Otherwise, README.md is pushed.
readme_file: ''
|
unknown
|
github
|
https://github.com/prometheus/prometheus
|
.github/workflows/container_description.yml
|
// @validatePreserveExistingMemoizationGuarantees @validateExhaustiveMemoizationDependencies:false
import {useCallback, useRef} from 'react';
function useCustomRef() {
return useRef({click: () => {}});
}
function Foo() {
const notaref = useCustomRef();
const onClick = useCallback(() => {
notaref.current?.click();
}, []);
return <button onClick={onClick} />;
}
export const FIXTURE_ENTRYPOINT = {
fn: Foo,
params: [],
isComponent: true,
};
|
javascript
|
github
|
https://github.com/facebook/react
|
compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/error.ref-like-name-not-a-ref.js
|
#!/usr/bin/env python
import json
import boto3
cfn = boto3.client('cloudformation')
paginator = cfn.get_paginator('describe_stacks')
pages = paginator.paginate(StackName="antioch-prod")
details = {}
for page in pages:
for stack in page['Stacks']:
for output in stack['Outputs']:
details[output['OutputKey']] = output['OutputValue']
with(open('zappa_settings.json')) as f:
cfg = json.loads(f.read())
cfg['prod']['vpc_config']['SecurityGroupIds'] = [details['WebappSecurityGroup']]
cfg['prod']['vpc_config']['SubnetIds'] = [
details['PrivateSubnet1AID'],
details['PrivateSubnet2AID'],
details['PrivateSubnet3AID']
]
cfg['prod']['environment_variables']['DB_HOST'] = details['DatabaseHost']
cfg['prod']['environment_variables']['DB_PORT'] = details['DatabasePort']
cfg['prod']['environment_variables']['STATIC_BUCKET'] = details['StaticBucketName']
with(open('zappa_settings.json', 'w')) as f:
f.write(json.dumps(cfg, indent=4))
print("Updated zappa_settings.json with stack variables.")
|
unknown
|
codeparrot/codeparrot-clean
| ||
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils.network.aruba.aruba import aruba_provider_spec
from ansible.module_utils.network.common.utils import load_provider
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
if self._play_context.connection != 'local':
return dict(
failed=True,
msg='invalid connection specified, expected connection=local, '
'got %s' % self._play_context.connection
)
provider = load_provider(aruba_provider_spec, self._task.args)
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'aruba'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
command_timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
connection.set_options(direct={'persistent_command_timeout': command_timeout})
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
# make sure we are in the right cli context which should be
# enable mode and not config module
conn = Connection(socket_path)
out = conn.get_prompt()
if to_text(out, errors='surrogate_then_replace').strip().endswith(')#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
conn.send_command('exit')
task_vars['ansible_socket'] = socket_path
if self._play_context.become_method == 'enable':
self._play_context.become = False
self._play_context.become_method = None
result = super(ActionModule, self).run(task_vars=task_vars)
return result
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#
# Please note that these reports are not multi-currency !!!
#
from openerp.osv import fields,osv
from openerp import tools
class purchase_report(osv.osv):
_name = "purchase.report"
_description = "Purchases Orders"
_auto = False
_columns = {
'date': fields.datetime('Order Date', readonly=True, help="Date on which this document has been created"), # TDE FIXME master: rename into date_order
'state': fields.selection([('draft', 'Request for Quotation'),
('confirmed', 'Waiting Supplier Ack'),
('approved', 'Approved'),
('except_picking', 'Shipping Exception'),
('except_invoice', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')],'Order Status', readonly=True),
'product_id':fields.many2one('product.product', 'Product', readonly=True),
'picking_type_id': fields.many2one('stock.warehouse', 'Warehouse', readonly=True),
'location_id': fields.many2one('stock.location', 'Destination', readonly=True),
'partner_id':fields.many2one('res.partner', 'Supplier', readonly=True),
'pricelist_id':fields.many2one('product.pricelist', 'Pricelist', readonly=True),
'date_approve':fields.date('Date Approved', readonly=True),
'expected_date':fields.date('Expected Date', readonly=True),
'validator' : fields.many2one('res.users', 'Validated By', readonly=True),
'product_uom' : fields.many2one('product.uom', 'Reference Unit of Measure', required=True),
'company_id':fields.many2one('res.company', 'Company', readonly=True),
'user_id':fields.many2one('res.users', 'Responsible', readonly=True),
'delay':fields.float('Days to Validate', digits=(16,2), readonly=True),
'delay_pass':fields.float('Days to Deliver', digits=(16,2), readonly=True),
'quantity': fields.integer('Unit Quantity', readonly=True), # TDE FIXME master: rename into unit_quantity
'price_total': fields.float('Total Price', readonly=True),
'price_average': fields.float('Average Price', readonly=True, group_operator="avg"),
'negociation': fields.float('Purchase-Standard Price', readonly=True, group_operator="avg"),
'price_standard': fields.float('Products Value', readonly=True, group_operator="sum"),
'nbr': fields.integer('# of Lines', readonly=True), # TDE FIXME master: rename into nbr_lines
'category_id': fields.many2one('product.category', 'Category', readonly=True)
}
_order = 'date desc, price_total desc'
def init(self, cr):
tools.sql.drop_view_if_exists(cr, 'purchase_report')
cr.execute("""
create or replace view purchase_report as (
WITH currency_rate (currency_id, rate, date_start, date_end) AS (
SELECT r.currency_id, r.rate, r.name AS date_start,
(SELECT name FROM res_currency_rate r2
WHERE r2.name > r.name AND
r2.currency_id = r.currency_id
ORDER BY r2.name ASC
LIMIT 1) AS date_end
FROM res_currency_rate r
)
select
min(l.id) as id,
s.date_order as date,
l.state,
s.date_approve,
s.minimum_planned_date as expected_date,
s.dest_address_id,
s.pricelist_id,
s.validator,
spt.warehouse_id as picking_type_id,
s.partner_id as partner_id,
s.create_uid as user_id,
s.company_id as company_id,
l.product_id,
t.categ_id as category_id,
t.uom_id as product_uom,
s.location_id as location_id,
sum(l.product_qty/u.factor*u2.factor) as quantity,
extract(epoch from age(s.date_approve,s.date_order))/(24*60*60)::decimal(16,2) as delay,
extract(epoch from age(l.date_planned,s.date_order))/(24*60*60)::decimal(16,2) as delay_pass,
count(*) as nbr,
sum(l.price_unit/cr.rate*l.product_qty)::decimal(16,2) as price_total,
avg(100.0 * (l.price_unit/cr.rate*l.product_qty) / NULLIF(ip.value_float*l.product_qty/u.factor*u2.factor, 0.0))::decimal(16,2) as negociation,
sum(ip.value_float*l.product_qty/u.factor*u2.factor)::decimal(16,2) as price_standard,
(sum(l.product_qty*l.price_unit/cr.rate)/NULLIF(sum(l.product_qty/u.factor*u2.factor),0.0))::decimal(16,2) as price_average
from purchase_order_line l
join purchase_order s on (l.order_id=s.id)
left join product_product p on (l.product_id=p.id)
left join product_template t on (p.product_tmpl_id=t.id)
LEFT JOIN ir_property ip ON (ip.name='standard_price' AND ip.res_id=CONCAT('product.template,',t.id) AND ip.company_id=s.company_id)
left join product_uom u on (u.id=l.product_uom)
left join product_uom u2 on (u2.id=t.uom_id)
left join stock_picking_type spt on (spt.id=s.picking_type_id)
join currency_rate cr on (cr.currency_id = s.currency_id and
cr.date_start <= coalesce(s.date_order, now()) and
(cr.date_end is null or cr.date_end > coalesce(s.date_order, now())))
group by
s.company_id,
s.create_uid,
s.partner_id,
u.factor,
s.location_id,
l.price_unit,
s.date_approve,
l.date_planned,
l.product_uom,
s.minimum_planned_date,
s.pricelist_id,
s.validator,
s.dest_address_id,
l.product_id,
t.categ_id,
s.date_order,
l.state,
spt.warehouse_id,
u.uom_type,
u.category_id,
t.uom_id,
u.id,
u2.factor
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*-------------------------------------------------------------------------
*
* pg_verifybackup.c
* Verify a backup against a backup manifest.
*
* Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/bin/pg_verifybackup/pg_verifybackup.c
*
*-------------------------------------------------------------------------
*/
#include "postgres_fe.h"
#include <dirent.h>
#include <fcntl.h>
#include <limits.h>
#include <sys/stat.h>
#include <time.h>
#include "access/xlog_internal.h"
#include "common/logging.h"
#include "common/parse_manifest.h"
#include "fe_utils/simple_list.h"
#include "getopt_long.h"
#include "pg_verifybackup.h"
#include "pgtime.h"
/*
* For efficiency, we'd like our hash table containing information about the
* manifest to start out with approximately the correct number of entries.
* There's no way to know the exact number of entries without reading the whole
* file, but we can get an estimate by dividing the file size by the estimated
* number of bytes per line.
*
* This could be off by about a factor of two in either direction, because the
* checksum algorithm has a big impact on the line lengths; e.g. a SHA512
* checksum is 128 hex bytes, whereas a CRC-32C value is only 8, and there
* might be no checksum at all.
*/
#define ESTIMATED_BYTES_PER_MANIFEST_LINE 100
/*
* How many bytes should we try to read from a file at once?
*/
#define READ_CHUNK_SIZE (128 * 1024)
/*
* Tar file information needed for content verification.
*/
typedef struct tar_file
{
char *relpath;
Oid tblspc_oid;
pg_compress_algorithm compress_algorithm;
} tar_file;
static manifest_data *parse_manifest_file(char *manifest_path);
static void verifybackup_version_cb(JsonManifestParseContext *context,
int manifest_version);
static void verifybackup_system_identifier(JsonManifestParseContext *context,
uint64 manifest_system_identifier);
static void verifybackup_per_file_cb(JsonManifestParseContext *context,
const char *pathname, uint64 size,
pg_checksum_type checksum_type,
int checksum_length,
uint8 *checksum_payload);
static void verifybackup_per_wal_range_cb(JsonManifestParseContext *context,
TimeLineID tli,
XLogRecPtr start_lsn,
XLogRecPtr end_lsn);
pg_noreturn static void report_manifest_error(JsonManifestParseContext *context,
const char *fmt,...)
pg_attribute_printf(2, 3);
static void verify_tar_backup(verifier_context *context, DIR *dir);
static void verify_plain_backup_directory(verifier_context *context,
char *relpath, char *fullpath,
DIR *dir);
static void verify_plain_backup_file(verifier_context *context, char *relpath,
char *fullpath);
static void verify_control_file(const char *controlpath,
uint64 manifest_system_identifier);
static void precheck_tar_backup_file(verifier_context *context, char *relpath,
char *fullpath, SimplePtrList *tarfiles);
static void verify_tar_file(verifier_context *context, char *relpath,
char *fullpath, astreamer *streamer);
static void report_extra_backup_files(verifier_context *context);
static void verify_backup_checksums(verifier_context *context);
static void verify_file_checksum(verifier_context *context,
manifest_file *m, char *fullpath,
uint8 *buffer);
static void parse_required_wal(verifier_context *context,
char *pg_waldump_path,
char *wal_directory);
static astreamer *create_archive_verifier(verifier_context *context,
char *archive_name,
Oid tblspc_oid,
pg_compress_algorithm compress_algo);
static void progress_report(bool finished);
static void usage(void);
static const char *progname;
/* is progress reporting enabled? */
static bool show_progress = false;
/* Progress indicators */
static uint64 total_size = 0;
static uint64 done_size = 0;
/*
* Main entry point.
*/
int
main(int argc, char **argv)
{
static struct option long_options[] = {
{"exit-on-error", no_argument, NULL, 'e'},
{"ignore", required_argument, NULL, 'i'},
{"manifest-path", required_argument, NULL, 'm'},
{"format", required_argument, NULL, 'F'},
{"no-parse-wal", no_argument, NULL, 'n'},
{"progress", no_argument, NULL, 'P'},
{"quiet", no_argument, NULL, 'q'},
{"skip-checksums", no_argument, NULL, 's'},
{"wal-directory", required_argument, NULL, 'w'},
{NULL, 0, NULL, 0}
};
int c;
verifier_context context;
char *manifest_path = NULL;
bool no_parse_wal = false;
bool quiet = false;
char *wal_directory = NULL;
char *pg_waldump_path = NULL;
DIR *dir;
pg_logging_init(argv[0]);
set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_verifybackup"));
progname = get_progname(argv[0]);
memset(&context, 0, sizeof(context));
if (argc > 1)
{
if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
{
usage();
exit(0);
}
if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
{
puts("pg_verifybackup (PostgreSQL) " PG_VERSION);
exit(0);
}
}
/*
* Skip certain files in the toplevel directory.
*
* Ignore the backup_manifest file, because it's not included in the
* backup manifest.
*
* Ignore the pg_wal directory, because those files are not included in
* the backup manifest either, since they are fetched separately from the
* backup itself, and verified via a separate mechanism.
*
* Ignore postgresql.auto.conf, recovery.signal, and standby.signal,
* because we expect that those files may sometimes be created or changed
* as part of the backup process. For example, pg_basebackup -R will
* modify postgresql.auto.conf and create standby.signal.
*/
simple_string_list_append(&context.ignore_list, "backup_manifest");
simple_string_list_append(&context.ignore_list, "pg_wal");
simple_string_list_append(&context.ignore_list, "postgresql.auto.conf");
simple_string_list_append(&context.ignore_list, "recovery.signal");
simple_string_list_append(&context.ignore_list, "standby.signal");
while ((c = getopt_long(argc, argv, "eF:i:m:nPqsw:", long_options, NULL)) != -1)
{
switch (c)
{
case 'e':
context.exit_on_error = true;
break;
case 'i':
{
char *arg = pstrdup(optarg);
canonicalize_path(arg);
simple_string_list_append(&context.ignore_list, arg);
break;
}
case 'm':
manifest_path = pstrdup(optarg);
canonicalize_path(manifest_path);
break;
case 'F':
if (strcmp(optarg, "p") == 0 || strcmp(optarg, "plain") == 0)
context.format = 'p';
else if (strcmp(optarg, "t") == 0 || strcmp(optarg, "tar") == 0)
context.format = 't';
else
pg_fatal("invalid backup format \"%s\", must be \"plain\" or \"tar\"",
optarg);
break;
case 'n':
no_parse_wal = true;
break;
case 'P':
show_progress = true;
break;
case 'q':
quiet = true;
break;
case 's':
context.skip_checksums = true;
break;
case 'w':
wal_directory = pstrdup(optarg);
canonicalize_path(wal_directory);
break;
default:
/* getopt_long already emitted a complaint */
pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
}
/* Get backup directory name */
if (optind >= argc)
{
pg_log_error("no backup directory specified");
pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
context.backup_directory = pstrdup(argv[optind++]);
canonicalize_path(context.backup_directory);
/* Complain if any arguments remain */
if (optind < argc)
{
pg_log_error("too many command-line arguments (first is \"%s\")",
argv[optind]);
pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
/* Complain if the specified arguments conflict */
if (show_progress && quiet)
pg_fatal("cannot specify both %s and %s",
"-P/--progress", "-q/--quiet");
/* Unless --no-parse-wal was specified, we will need pg_waldump. */
if (!no_parse_wal)
{
int ret;
pg_waldump_path = pg_malloc(MAXPGPATH);
ret = find_other_exec(argv[0], "pg_waldump",
"pg_waldump (PostgreSQL) " PG_VERSION "\n",
pg_waldump_path);
if (ret < 0)
{
char full_path[MAXPGPATH];
if (find_my_exec(argv[0], full_path) < 0)
strlcpy(full_path, progname, sizeof(full_path));
if (ret == -1)
pg_fatal("program \"%s\" is needed by %s but was not found in the same directory as \"%s\"",
"pg_waldump", "pg_verifybackup", full_path);
else
pg_fatal("program \"%s\" was found by \"%s\" but was not the same version as %s",
"pg_waldump", full_path, "pg_verifybackup");
}
}
/* By default, look for the manifest in the backup directory. */
if (manifest_path == NULL)
manifest_path = psprintf("%s/backup_manifest",
context.backup_directory);
/* By default, look for the WAL in the backup directory, too. */
if (wal_directory == NULL)
wal_directory = psprintf("%s/pg_wal", context.backup_directory);
/*
* Try to read the manifest. We treat any errors encountered while parsing
* the manifest as fatal; there doesn't seem to be much point in trying to
* verify the backup directory against a corrupted manifest.
*/
context.manifest = parse_manifest_file(manifest_path);
/*
* If the backup directory cannot be found, treat this as a fatal error.
*/
dir = opendir(context.backup_directory);
if (dir == NULL)
report_fatal_error("could not open directory \"%s\": %m",
context.backup_directory);
/*
* At this point, we know that the backup directory exists, so it's now
* reasonable to check for files immediately inside it. Thus, before going
* further, if the user did not specify the backup format, check for
* PG_VERSION to distinguish between tar and plain format.
*/
if (context.format == '\0')
{
struct stat sb;
char *path;
path = psprintf("%s/%s", context.backup_directory, "PG_VERSION");
if (stat(path, &sb) == 0)
context.format = 'p';
else if (errno != ENOENT)
{
pg_log_error("could not stat file \"%s\": %m", path);
exit(1);
}
else
{
/* No PG_VERSION, so assume tar format. */
context.format = 't';
}
pfree(path);
}
/*
* XXX: In the future, we should consider enhancing pg_waldump to read WAL
* files from an archive.
*/
if (!no_parse_wal && context.format == 't')
{
pg_log_error("pg_waldump cannot read tar files");
pg_log_error_hint("You must use -n/--no-parse-wal when verifying a tar-format backup.");
exit(1);
}
/*
* Perform the appropriate type of verification appropriate based on the
* backup format. This will close 'dir'.
*/
if (context.format == 'p')
verify_plain_backup_directory(&context, NULL, context.backup_directory,
dir);
else
verify_tar_backup(&context, dir);
/*
* The "matched" flag should now be set on every entry in the hash table.
* Any entries for which the bit is not set are files mentioned in the
* manifest that don't exist on disk (or in the relevant tar files).
*/
report_extra_backup_files(&context);
/*
* If this is a tar-format backup, checksums were already verified above;
* but if it's a plain-format backup, we postpone it until this point,
* since the earlier checks can be performed just by knowing which files
* are present, without needing to read all of them.
*/
if (context.format == 'p' && !context.skip_checksums)
verify_backup_checksums(&context);
/*
* Try to parse the required ranges of WAL records, unless we were told
* not to do so.
*/
if (!no_parse_wal)
parse_required_wal(&context, pg_waldump_path, wal_directory);
/*
* If everything looks OK, tell the user this, unless we were asked to
* work quietly.
*/
if (!context.saw_any_error && !quiet)
printf(_("backup successfully verified\n"));
return context.saw_any_error ? 1 : 0;
}
/*
* Parse a manifest file and return a data structure describing the contents.
*/
static manifest_data *
parse_manifest_file(char *manifest_path)
{
int fd;
struct stat statbuf;
off_t estimate;
uint32 initial_size;
manifest_files_hash *ht;
char *buffer;
int rc;
JsonManifestParseContext context;
manifest_data *result;
int chunk_size = READ_CHUNK_SIZE;
/* Open the manifest file. */
if ((fd = open(manifest_path, O_RDONLY | PG_BINARY, 0)) < 0)
report_fatal_error("could not open file \"%s\": %m", manifest_path);
/* Figure out how big the manifest is. */
if (fstat(fd, &statbuf) != 0)
report_fatal_error("could not stat file \"%s\": %m", manifest_path);
/* Guess how large to make the hash table based on the manifest size. */
estimate = statbuf.st_size / ESTIMATED_BYTES_PER_MANIFEST_LINE;
initial_size = Min(PG_UINT32_MAX, Max(estimate, 256));
/* Create the hash table. */
ht = manifest_files_create(initial_size, NULL);
result = pg_malloc0(sizeof(manifest_data));
result->files = ht;
context.private_data = result;
context.version_cb = verifybackup_version_cb;
context.system_identifier_cb = verifybackup_system_identifier;
context.per_file_cb = verifybackup_per_file_cb;
context.per_wal_range_cb = verifybackup_per_wal_range_cb;
context.error_cb = report_manifest_error;
/*
* Parse the file, in chunks if necessary.
*/
if (statbuf.st_size <= chunk_size)
{
buffer = pg_malloc(statbuf.st_size);
rc = read(fd, buffer, statbuf.st_size);
if (rc != statbuf.st_size)
{
if (rc < 0)
pg_fatal("could not read file \"%s\": %m", manifest_path);
else
pg_fatal("could not read file \"%s\": read %d of %lld",
manifest_path, rc, (long long int) statbuf.st_size);
}
/* Close the manifest file. */
close(fd);
/* Parse the manifest. */
json_parse_manifest(&context, buffer, statbuf.st_size);
}
else
{
int bytes_left = statbuf.st_size;
JsonManifestParseIncrementalState *inc_state;
inc_state = json_parse_manifest_incremental_init(&context);
buffer = pg_malloc(chunk_size + 1);
while (bytes_left > 0)
{
int bytes_to_read = chunk_size;
/*
* Make sure that the last chunk is sufficiently large. (i.e. at
* least half the chunk size) so that it will contain fully the
* piece at the end with the checksum.
*/
if (bytes_left < chunk_size)
bytes_to_read = bytes_left;
else if (bytes_left < 2 * chunk_size)
bytes_to_read = bytes_left / 2;
rc = read(fd, buffer, bytes_to_read);
if (rc != bytes_to_read)
{
if (rc < 0)
pg_fatal("could not read file \"%s\": %m", manifest_path);
else
pg_fatal("could not read file \"%s\": read %lld of %lld",
manifest_path,
(long long int) (statbuf.st_size + rc - bytes_left),
(long long int) statbuf.st_size);
}
bytes_left -= rc;
json_parse_manifest_incremental_chunk(inc_state, buffer, rc,
bytes_left == 0);
}
/* Release the incremental state memory */
json_parse_manifest_incremental_shutdown(inc_state);
close(fd);
}
/* Done with the buffer. */
pfree(buffer);
return result;
}
/*
* Report an error while parsing the manifest.
*
* We consider all such errors to be fatal errors. The manifest parser
* expects this function not to return.
*/
static void
report_manifest_error(JsonManifestParseContext *context, const char *fmt,...)
{
va_list ap;
va_start(ap, fmt);
pg_log_generic_v(PG_LOG_ERROR, PG_LOG_PRIMARY, gettext(fmt), ap);
va_end(ap);
exit(1);
}
/*
* Record details extracted from the backup manifest.
*/
static void
verifybackup_version_cb(JsonManifestParseContext *context,
int manifest_version)
{
manifest_data *manifest = context->private_data;
/* Validation will be at the later stage */
manifest->version = manifest_version;
}
/*
* Record details extracted from the backup manifest.
*/
static void
verifybackup_system_identifier(JsonManifestParseContext *context,
uint64 manifest_system_identifier)
{
manifest_data *manifest = context->private_data;
/* Validation will be at the later stage */
manifest->system_identifier = manifest_system_identifier;
}
/*
* Record details extracted from the backup manifest for one file.
*/
static void
verifybackup_per_file_cb(JsonManifestParseContext *context,
const char *pathname, uint64 size,
pg_checksum_type checksum_type,
int checksum_length, uint8 *checksum_payload)
{
manifest_data *manifest = context->private_data;
manifest_files_hash *ht = manifest->files;
manifest_file *m;
bool found;
/* Make a new entry in the hash table for this file. */
m = manifest_files_insert(ht, pathname, &found);
if (found)
report_fatal_error("duplicate path name in backup manifest: \"%s\"",
pathname);
/* Initialize the entry. */
m->size = size;
m->checksum_type = checksum_type;
m->checksum_length = checksum_length;
m->checksum_payload = checksum_payload;
m->matched = false;
m->bad = false;
}
/*
* Record details extracted from the backup manifest for one WAL range.
*/
static void
verifybackup_per_wal_range_cb(JsonManifestParseContext *context,
TimeLineID tli,
XLogRecPtr start_lsn, XLogRecPtr end_lsn)
{
manifest_data *manifest = context->private_data;
manifest_wal_range *range;
/* Allocate and initialize a struct describing this WAL range. */
range = palloc_object(manifest_wal_range);
range->tli = tli;
range->start_lsn = start_lsn;
range->end_lsn = end_lsn;
range->prev = manifest->last_wal_range;
range->next = NULL;
/* Add it to the end of the list. */
if (manifest->first_wal_range == NULL)
manifest->first_wal_range = range;
else
manifest->last_wal_range->next = range;
manifest->last_wal_range = range;
}
/*
* Verify one directory of a plain-format backup.
*
* 'relpath' is NULL if we are to verify the top-level backup directory,
* and otherwise the relative path to the directory that is to be verified.
*
* 'fullpath' is the backup directory with 'relpath' appended; i.e. the actual
* filesystem path at which it can be found.
*
* 'dir' is an open directory handle, or NULL if the caller wants us to
* open it. If the caller chooses to pass a handle, we'll close it when
* we're done with it.
*/
static void
verify_plain_backup_directory(verifier_context *context, char *relpath,
char *fullpath, DIR *dir)
{
struct dirent *dirent;
/* Open the directory unless the caller did it. */
if (dir == NULL && ((dir = opendir(fullpath)) == NULL))
{
report_backup_error(context,
"could not open directory \"%s\": %m", fullpath);
simple_string_list_append(&context->ignore_list, relpath);
return;
}
while (errno = 0, (dirent = readdir(dir)) != NULL)
{
char *filename = dirent->d_name;
char *newfullpath = psprintf("%s/%s", fullpath, filename);
char *newrelpath;
/* Skip "." and ".." */
if (filename[0] == '.' && (filename[1] == '\0'
|| strcmp(filename, "..") == 0))
continue;
if (relpath == NULL)
newrelpath = pstrdup(filename);
else
newrelpath = psprintf("%s/%s", relpath, filename);
if (!should_ignore_relpath(context, newrelpath))
verify_plain_backup_file(context, newrelpath, newfullpath);
pfree(newfullpath);
pfree(newrelpath);
}
if (closedir(dir))
{
report_backup_error(context,
"could not close directory \"%s\": %m", fullpath);
return;
}
}
/*
* Verify one file (which might actually be a directory or a symlink).
*
* The arguments to this function have the same meaning as the similarly named
* arguments to verify_plain_backup_directory.
*/
static void
verify_plain_backup_file(verifier_context *context, char *relpath,
char *fullpath)
{
struct stat sb;
manifest_file *m;
if (stat(fullpath, &sb) != 0)
{
report_backup_error(context,
"could not stat file or directory \"%s\": %m",
relpath);
/*
* Suppress further errors related to this path name and, if it's a
* directory, anything underneath it.
*/
simple_string_list_append(&context->ignore_list, relpath);
return;
}
/* If it's a directory, just recurse. */
if (S_ISDIR(sb.st_mode))
{
verify_plain_backup_directory(context, relpath, fullpath, NULL);
return;
}
/* If it's not a directory, it should be a regular file. */
if (!S_ISREG(sb.st_mode))
{
report_backup_error(context,
"\"%s\" is not a regular file or directory",
relpath);
return;
}
/* Check whether there's an entry in the manifest hash. */
m = manifest_files_lookup(context->manifest->files, relpath);
if (m == NULL)
{
report_backup_error(context,
"\"%s\" is present on disk but not in the manifest",
relpath);
return;
}
/* Flag this entry as having been encountered in the filesystem. */
m->matched = true;
/* Check that the size matches. */
if (m->size != sb.st_size)
{
report_backup_error(context,
"\"%s\" has size %llu on disk but size %llu in the manifest",
relpath, (unsigned long long) sb.st_size,
(unsigned long long) m->size);
m->bad = true;
}
/*
* Validate the manifest system identifier, not available in manifest
* version 1.
*/
if (context->manifest->version != 1 &&
strcmp(relpath, XLOG_CONTROL_FILE) == 0)
verify_control_file(fullpath, context->manifest->system_identifier);
/* Update statistics for progress report, if necessary */
if (show_progress && !context->skip_checksums &&
should_verify_checksum(m))
total_size += m->size;
/*
* We don't verify checksums at this stage. We first finish verifying that
* we have the expected set of files with the expected sizes, and only
* afterwards verify the checksums. That's because computing checksums may
* take a while, and we'd like to report more obvious problems quickly.
*/
}
/*
* Sanity check control file and validate system identifier against manifest
* system identifier.
*/
static void
verify_control_file(const char *controlpath, uint64 manifest_system_identifier)
{
ControlFileData *control_file;
bool crc_ok;
pg_log_debug("reading \"%s\"", controlpath);
control_file = get_controlfile_by_exact_path(controlpath, &crc_ok);
/* Control file contents not meaningful if CRC is bad. */
if (!crc_ok)
report_fatal_error("%s: CRC is incorrect", controlpath);
/* Can't interpret control file if not current version. */
if (control_file->pg_control_version != PG_CONTROL_VERSION)
report_fatal_error("%s: unexpected control file version",
controlpath);
/* System identifiers should match. */
if (manifest_system_identifier != control_file->system_identifier)
report_fatal_error("%s: manifest system identifier is %" PRIu64 ", but control file has %" PRIu64,
controlpath,
manifest_system_identifier,
control_file->system_identifier);
/* Release memory. */
pfree(control_file);
}
/*
* Verify tar backup.
*
* The caller should pass a handle to the target directory, which we will
* close when we're done with it.
*/
static void
verify_tar_backup(verifier_context *context, DIR *dir)
{
struct dirent *dirent;
SimplePtrList tarfiles = {NULL, NULL};
SimplePtrListCell *cell;
Assert(context->format != 'p');
progress_report(false);
/* First pass: scan the directory for tar files. */
while (errno = 0, (dirent = readdir(dir)) != NULL)
{
char *filename = dirent->d_name;
/* Skip "." and ".." */
if (filename[0] == '.' && (filename[1] == '\0'
|| strcmp(filename, "..") == 0))
continue;
/*
* Unless it's something we should ignore, perform prechecks and add
* it to the list.
*/
if (!should_ignore_relpath(context, filename))
{
char *fullpath;
fullpath = psprintf("%s/%s", context->backup_directory, filename);
precheck_tar_backup_file(context, filename, fullpath, &tarfiles);
pfree(fullpath);
}
}
if (closedir(dir))
{
report_backup_error(context,
"could not close directory \"%s\": %m",
context->backup_directory);
return;
}
/* Second pass: Perform the final verification of the tar contents. */
for (cell = tarfiles.head; cell != NULL; cell = cell->next)
{
tar_file *tar = (tar_file *) cell->ptr;
astreamer *streamer;
char *fullpath;
/*
* Prepares the archive streamer stack according to the tar
* compression format.
*/
streamer = create_archive_verifier(context,
tar->relpath,
tar->tblspc_oid,
tar->compress_algorithm);
/* Compute the full pathname to the target file. */
fullpath = psprintf("%s/%s", context->backup_directory,
tar->relpath);
/* Invoke the streamer for reading, decompressing, and verifying. */
verify_tar_file(context, tar->relpath, fullpath, streamer);
/* Cleanup. */
pfree(tar->relpath);
pfree(tar);
pfree(fullpath);
astreamer_finalize(streamer);
astreamer_free(streamer);
}
simple_ptr_list_destroy(&tarfiles);
progress_report(true);
}
/*
* Preparatory steps for verifying files in tar format backups.
*
* Carries out basic validation of the tar format backup file, detects the
* compression type, and appends that information to the tarfiles list. An
* error will be reported if the tar file is inaccessible, or if the file type,
* name, or compression type is not as expected.
*
* The arguments to this function are mostly the same as the
* verify_plain_backup_file. The additional argument outputs a list of valid
* tar files.
*/
static void
precheck_tar_backup_file(verifier_context *context, char *relpath,
char *fullpath, SimplePtrList *tarfiles)
{
struct stat sb;
Oid tblspc_oid = InvalidOid;
pg_compress_algorithm compress_algorithm;
tar_file *tar;
char *suffix = NULL;
/* Should be tar format backup */
Assert(context->format == 't');
/* Get file information */
if (stat(fullpath, &sb) != 0)
{
report_backup_error(context,
"could not stat file or directory \"%s\": %m",
relpath);
return;
}
/* In a tar format backup, we expect only regular files. */
if (!S_ISREG(sb.st_mode))
{
report_backup_error(context,
"file \"%s\" is not a regular file",
relpath);
return;
}
/*
* We expect tar files for backing up the main directory, tablespace, and
* pg_wal directory.
*
* pg_basebackup writes the main data directory to an archive file named
* base.tar, the pg_wal directory to pg_wal.tar, and the tablespace
* directory to <tablespaceoid>.tar, each followed by a compression type
* extension such as .gz, .lz4, or .zst.
*/
if (strncmp("base", relpath, 4) == 0)
suffix = relpath + 4;
else if (strncmp("pg_wal", relpath, 6) == 0)
suffix = relpath + 6;
else
{
/* Expected a <tablespaceoid>.tar file here. */
uint64 num = strtoul(relpath, &suffix, 10);
/*
* Report an error if we didn't consume at least one character, if the
* result is 0, or if the value is too large to be a valid OID.
*/
if (suffix == NULL || num <= 0 || num > OID_MAX)
{
report_backup_error(context,
"file \"%s\" is not expected in a tar format backup",
relpath);
return;
}
tblspc_oid = (Oid) num;
}
/* Now, check the compression type of the tar */
if (strcmp(suffix, ".tar") == 0)
compress_algorithm = PG_COMPRESSION_NONE;
else if (strcmp(suffix, ".tgz") == 0)
compress_algorithm = PG_COMPRESSION_GZIP;
else if (strcmp(suffix, ".tar.gz") == 0)
compress_algorithm = PG_COMPRESSION_GZIP;
else if (strcmp(suffix, ".tar.lz4") == 0)
compress_algorithm = PG_COMPRESSION_LZ4;
else if (strcmp(suffix, ".tar.zst") == 0)
compress_algorithm = PG_COMPRESSION_ZSTD;
else
{
report_backup_error(context,
"file \"%s\" is not expected in a tar format backup",
relpath);
return;
}
/*
* Ignore WALs, as reading and verification will be handled through
* pg_waldump.
*/
if (strncmp("pg_wal", relpath, 6) == 0)
return;
/*
* Append the information to the list for complete verification at a later
* stage.
*/
tar = pg_malloc(sizeof(tar_file));
tar->relpath = pstrdup(relpath);
tar->tblspc_oid = tblspc_oid;
tar->compress_algorithm = compress_algorithm;
simple_ptr_list_append(tarfiles, tar);
/* Update statistics for progress report, if necessary */
if (show_progress)
total_size += sb.st_size;
}
/*
* Verification of a single tar file content.
*
* It reads a given tar archive in predefined chunks and passes it to the
* streamer, which initiates routines for decompression (if necessary) and then
* verifies each member within the tar file.
*/
static void
verify_tar_file(verifier_context *context, char *relpath, char *fullpath,
astreamer *streamer)
{
int fd;
int rc;
char *buffer;
pg_log_debug("reading \"%s\"", fullpath);
/* Open the target file. */
if ((fd = open(fullpath, O_RDONLY | PG_BINARY, 0)) < 0)
{
report_backup_error(context, "could not open file \"%s\": %m",
relpath);
return;
}
buffer = pg_malloc(READ_CHUNK_SIZE * sizeof(uint8));
/* Perform the reads */
while ((rc = read(fd, buffer, READ_CHUNK_SIZE)) > 0)
{
astreamer_content(streamer, NULL, buffer, rc, ASTREAMER_UNKNOWN);
/* Report progress */
done_size += rc;
progress_report(false);
}
pg_free(buffer);
if (rc < 0)
report_backup_error(context, "could not read file \"%s\": %m",
relpath);
/* Close the file. */
if (close(fd) != 0)
report_backup_error(context, "could not close file \"%s\": %m",
relpath);
}
/*
* Scan the hash table for entries where the 'matched' flag is not set; report
* that such files are present in the manifest but not on disk.
*/
static void
report_extra_backup_files(verifier_context *context)
{
manifest_data *manifest = context->manifest;
manifest_files_iterator it;
manifest_file *m;
manifest_files_start_iterate(manifest->files, &it);
while ((m = manifest_files_iterate(manifest->files, &it)) != NULL)
if (!m->matched && !should_ignore_relpath(context, m->pathname))
report_backup_error(context,
"\"%s\" is present in the manifest but not on disk",
m->pathname);
}
/*
* Verify checksums for hash table entries that are otherwise unproblematic.
* If we've already reported some problem related to a hash table entry, or
* if it has no checksum, just skip it.
*/
static void
verify_backup_checksums(verifier_context *context)
{
manifest_data *manifest = context->manifest;
manifest_files_iterator it;
manifest_file *m;
uint8 *buffer;
progress_report(false);
buffer = pg_malloc(READ_CHUNK_SIZE * sizeof(uint8));
manifest_files_start_iterate(manifest->files, &it);
while ((m = manifest_files_iterate(manifest->files, &it)) != NULL)
{
if (should_verify_checksum(m) &&
!should_ignore_relpath(context, m->pathname))
{
char *fullpath;
/* Compute the full pathname to the target file. */
fullpath = psprintf("%s/%s", context->backup_directory,
m->pathname);
/* Do the actual checksum verification. */
verify_file_checksum(context, m, fullpath, buffer);
/* Avoid leaking memory. */
pfree(fullpath);
}
}
pfree(buffer);
progress_report(true);
}
/*
* Verify the checksum of a single file.
*/
static void
verify_file_checksum(verifier_context *context, manifest_file *m,
char *fullpath, uint8 *buffer)
{
pg_checksum_context checksum_ctx;
const char *relpath = m->pathname;
int fd;
int rc;
uint64 bytes_read = 0;
uint8 checksumbuf[PG_CHECKSUM_MAX_LENGTH];
int checksumlen;
/* Open the target file. */
if ((fd = open(fullpath, O_RDONLY | PG_BINARY, 0)) < 0)
{
report_backup_error(context, "could not open file \"%s\": %m",
relpath);
return;
}
/* Initialize checksum context. */
if (pg_checksum_init(&checksum_ctx, m->checksum_type) < 0)
{
report_backup_error(context, "could not initialize checksum of file \"%s\"",
relpath);
close(fd);
return;
}
/* Read the file chunk by chunk, updating the checksum as we go. */
while ((rc = read(fd, buffer, READ_CHUNK_SIZE)) > 0)
{
bytes_read += rc;
if (pg_checksum_update(&checksum_ctx, buffer, rc) < 0)
{
report_backup_error(context, "could not update checksum of file \"%s\"",
relpath);
close(fd);
return;
}
/* Report progress */
done_size += rc;
progress_report(false);
}
if (rc < 0)
report_backup_error(context, "could not read file \"%s\": %m",
relpath);
/* Close the file. */
if (close(fd) != 0)
{
report_backup_error(context, "could not close file \"%s\": %m",
relpath);
return;
}
/* If we didn't manage to read the whole file, bail out now. */
if (rc < 0)
return;
/*
* Double-check that we read the expected number of bytes from the file.
* Normally, mismatches would be caught in verify_plain_backup_file and
* this check would never be reached, but this provides additional safety
* and clarity in the event of concurrent modifications or filesystem
* misbehavior.
*/
if (bytes_read != m->size)
{
report_backup_error(context,
"file \"%s\" should contain %" PRIu64 " bytes, but read %" PRIu64,
relpath, m->size, bytes_read);
return;
}
/* Get the final checksum. */
checksumlen = pg_checksum_final(&checksum_ctx, checksumbuf);
if (checksumlen < 0)
{
report_backup_error(context,
"could not finalize checksum of file \"%s\"",
relpath);
return;
}
/* And check it against the manifest. */
if (checksumlen != m->checksum_length)
report_backup_error(context,
"file \"%s\" has checksum of length %d, but expected %d",
relpath, m->checksum_length, checksumlen);
else if (memcmp(checksumbuf, m->checksum_payload, checksumlen) != 0)
report_backup_error(context,
"checksum mismatch for file \"%s\"",
relpath);
}
/*
* Attempt to parse the WAL files required to restore from backup using
* pg_waldump.
*/
static void
parse_required_wal(verifier_context *context, char *pg_waldump_path,
char *wal_directory)
{
manifest_data *manifest = context->manifest;
manifest_wal_range *this_wal_range = manifest->first_wal_range;
while (this_wal_range != NULL)
{
char *pg_waldump_cmd;
pg_waldump_cmd = psprintf("\"%s\" --quiet --path=\"%s\" --timeline=%u --start=%X/%08X --end=%X/%08X\n",
pg_waldump_path, wal_directory, this_wal_range->tli,
LSN_FORMAT_ARGS(this_wal_range->start_lsn),
LSN_FORMAT_ARGS(this_wal_range->end_lsn));
fflush(NULL);
if (system(pg_waldump_cmd) != 0)
report_backup_error(context,
"WAL parsing failed for timeline %u",
this_wal_range->tli);
this_wal_range = this_wal_range->next;
}
}
/*
* Report a problem with the backup.
*
* Update the context to indicate that we saw an error, and exit if the
* context says we should.
*/
void
report_backup_error(verifier_context *context, const char *pg_restrict fmt,...)
{
va_list ap;
va_start(ap, fmt);
pg_log_generic_v(PG_LOG_ERROR, PG_LOG_PRIMARY, gettext(fmt), ap);
va_end(ap);
context->saw_any_error = true;
if (context->exit_on_error)
exit(1);
}
/*
* Report a fatal error and exit
*/
void
report_fatal_error(const char *pg_restrict fmt,...)
{
va_list ap;
va_start(ap, fmt);
pg_log_generic_v(PG_LOG_ERROR, PG_LOG_PRIMARY, gettext(fmt), ap);
va_end(ap);
exit(1);
}
/*
* Is the specified relative path, or some prefix of it, listed in the set
* of paths to ignore?
*
* Note that by "prefix" we mean a parent directory; for this purpose,
* "aa/bb" is not a prefix of "aa/bbb", but it is a prefix of "aa/bb/cc".
*/
bool
should_ignore_relpath(verifier_context *context, const char *relpath)
{
SimpleStringListCell *cell;
for (cell = context->ignore_list.head; cell != NULL; cell = cell->next)
{
const char *r = relpath;
char *v = cell->val;
while (*v != '\0' && *r == *v)
++r, ++v;
if (*v == '\0' && (*r == '\0' || *r == '/'))
return true;
}
return false;
}
/*
* Create a chain of archive streamers appropriate for verifying a given
* archive.
*/
static astreamer *
create_archive_verifier(verifier_context *context, char *archive_name,
Oid tblspc_oid, pg_compress_algorithm compress_algo)
{
astreamer *streamer = NULL;
/* Should be here only for tar backup */
Assert(context->format == 't');
/* Last step is the actual verification. */
streamer = astreamer_verify_content_new(streamer, context, archive_name,
tblspc_oid);
/* Before that we must parse the tar file. */
streamer = astreamer_tar_parser_new(streamer);
/* Before that we must decompress, if archive is compressed. */
if (compress_algo == PG_COMPRESSION_GZIP)
streamer = astreamer_gzip_decompressor_new(streamer);
else if (compress_algo == PG_COMPRESSION_LZ4)
streamer = astreamer_lz4_decompressor_new(streamer);
else if (compress_algo == PG_COMPRESSION_ZSTD)
streamer = astreamer_zstd_decompressor_new(streamer);
return streamer;
}
/*
* Print a progress report based on the global variables.
*
* Progress report is written at maximum once per second, unless the finished
* parameter is set to true.
*
* If finished is set to true, this is the last progress report. The cursor
* is moved to the next line.
*/
static void
progress_report(bool finished)
{
static pg_time_t last_progress_report = 0;
pg_time_t now;
int percent_size = 0;
char totalsize_str[32];
char donesize_str[32];
if (!show_progress)
return;
now = time(NULL);
if (now == last_progress_report && !finished)
return; /* Max once per second */
last_progress_report = now;
percent_size = total_size ? (int) ((done_size * 100 / total_size)) : 0;
snprintf(totalsize_str, sizeof(totalsize_str), UINT64_FORMAT,
total_size / 1024);
snprintf(donesize_str, sizeof(donesize_str), UINT64_FORMAT,
done_size / 1024);
fprintf(stderr,
_("%*s/%s kB (%d%%) verified"),
(int) strlen(totalsize_str),
donesize_str, totalsize_str, percent_size);
/*
* Stay on the same line if reporting to a terminal and we're not done
* yet.
*/
fputc((!finished && isatty(fileno(stderr))) ? '\r' : '\n', stderr);
}
/*
* Print out usage information and exit.
*/
static void
usage(void)
{
printf(_("%s verifies a backup against the backup manifest.\n\n"), progname);
printf(_("Usage:\n %s [OPTION]... BACKUPDIR\n\n"), progname);
printf(_("Options:\n"));
printf(_(" -e, --exit-on-error exit immediately on error\n"));
printf(_(" -F, --format=p|t backup format (plain, tar)\n"));
printf(_(" -i, --ignore=RELATIVE_PATH ignore indicated path\n"));
printf(_(" -m, --manifest-path=PATH use specified path for manifest\n"));
printf(_(" -n, --no-parse-wal do not try to parse WAL files\n"));
printf(_(" -P, --progress show progress information\n"));
printf(_(" -q, --quiet do not print any output, except for errors\n"));
printf(_(" -s, --skip-checksums skip checksum verification\n"));
printf(_(" -w, --wal-directory=PATH use specified path for WAL files\n"));
printf(_(" -V, --version output version information, then exit\n"));
printf(_(" -?, --help show this help, then exit\n"));
printf(_("\nReport bugs to <%s>.\n"), PACKAGE_BUGREPORT);
printf(_("%s home page: <%s>\n"), PACKAGE_NAME, PACKAGE_URL);
}
|
c
|
github
|
https://github.com/postgres/postgres
|
src/bin/pg_verifybackup/pg_verifybackup.c
|
import ConfigParser
from zope.interface import implements
#from repoze.who.interfaces import IChallenger, IIdentifier, IAuthenticator
from repoze.who.interfaces import IMetadataProvider
class INIMetadataProvider(object):
implements(IMetadataProvider)
def __init__(self, ini_file, key_attribute):
self.users = ConfigParser.ConfigParser()
self.users.readfp(open(ini_file))
self.key_attribute = key_attribute
def add_metadata(self, _environ, identity):
#logger = environ.get('repoze.who.logger','')
key = identity.get('repoze.who.userid')
try:
if self.key_attribute:
for sec in self.users.sections():
if self.users.has_option(sec, self.key_attribute):
if key in self.users.get(sec, self.key_attribute):
identity["user"] = dict(self.users.items(sec))
break
else:
identity["user"] = dict(self.users.items(key))
except ValueError:
pass
def make_plugin(ini_file, key_attribute=""):
return INIMetadataProvider(ini_file, key_attribute)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
##############################################################################
#
# This module uses OpenERP, Open Source Management Solution Framework.
# Copyright (C):
# 2012-Today Serpent Consulting Services (<http://www.serpentcs.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import orm, fields
from openerp.tools.translate import _
class MassObject(orm.Model):
_name = "mass.object"
_columns = {
'name': fields.char("Name", size=64, required=True, select=1),
'model_id': fields.many2one(
'ir.model', 'Model', required=True, select=1),
'field_ids': fields.many2many(
'ir.model.fields', 'mass_field_rel', 'mass_id', 'field_id',
'Fields'),
'ref_ir_act_window': fields.many2one(
'ir.actions.act_window', 'Sidebar Action', readonly=True,
help="Sidebar action to make this template available on records \
of the related document model"),
'ref_ir_value': fields.many2one(
'ir.values', 'Sidebar Button', readonly=True,
help="Sidebar button to open the sidebar action"),
'model_ids': fields.many2many('ir.model', string='Model List')
}
_sql_constraints = [
('name_uniq', 'unique (name)', _('Name must be unique!')),
]
def onchange_model_id(self, cr, uid, ids, model_id, context=None):
if context is None:
context = {}
if not model_id:
return {'value': {'model_ids': [(6, 0, [])]}}
model_ids = [model_id]
model_obj = self.pool['ir.model']
active_model_obj = self.pool.get(model_obj.browse(
cr, uid, model_id).model)
if active_model_obj._inherits:
for key, val in active_model_obj._inherits.items():
found_model_ids = model_obj.search(
cr, uid, [('model', '=', key)], context=context)
model_ids += found_model_ids
return {'value': {'model_ids': [(6, 0, model_ids)]}}
def create_action(self, cr, uid, ids, context=None):
vals = {}
action_obj = self.pool['ir.actions.act_window']
ir_values_obj = self.pool['ir.values']
for data in self.browse(cr, uid, ids, context=context):
src_obj = data.model_id.model
button_name = _('Mass Editing (%s)') % data.name
vals['ref_ir_act_window'] = action_obj.create(
cr, SUPERUSER_ID,
{
'name': button_name,
'type': 'ir.actions.act_window',
'res_model': 'mass.editing.wizard',
'src_model': src_obj,
'view_type': 'form',
'context': "{'mass_editing_object' : %d}" % (data.id),
'view_mode': 'form,tree',
'target': 'new',
'auto_refresh': 1,
},
context)
vals['ref_ir_value'] = ir_values_obj.create(
cr, SUPERUSER_ID,
{
'name': button_name,
'model': src_obj,
'key2': 'client_action_multi',
'value': (
"ir.actions.act_window," +
str(vals['ref_ir_act_window'])),
'object': True,
},
context)
self.write(
cr, uid, ids,
{
'ref_ir_act_window': vals.get('ref_ir_act_window', False),
'ref_ir_value': vals.get('ref_ir_value', False),
},
context)
return True
def unlink_action(self, cr, uid, ids, context=None):
for template in self.browse(cr, uid, ids, context=context):
try:
if template.ref_ir_act_window:
act_window_obj = self.pool['ir.actions.act_window']
act_window_obj.unlink(
cr, SUPERUSER_ID, [template.ref_ir_act_window.id],
context=context)
if template.ref_ir_value:
ir_values_obj = self.pool['ir.values']
ir_values_obj.unlink(
cr, SUPERUSER_ID, template.ref_ir_value.id,
context=context)
except:
raise orm.except_orm(
_("Warning"),
_("Deletion of the action record failed."))
return True
def unlink(self, cr, uid, ids, context=None):
self.unlink_action(cr, uid, ids, context=context)
return super(MassObject, self).unlink(cr, uid, ids, context=context)
def copy(self, cr, uid, record_id, default=None, context=None):
if default is None:
default = {}
default.update({'name': '', 'field_ids': []})
return super(MassObject, self).copy(
cr, uid, record_id, default, context=context)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
import fauxfactory
import pytest
from cfme import test_requirements
from cfme.utils.rest import assert_response
from cfme.utils.rest import delete_resources_from_collection
from cfme.utils.wait import wait_for
pytestmark = [
pytest.mark.long_running,
pytest.mark.meta(server_roles=['+embedded_ansible']),
pytest.mark.ignore_stream('upstream'),
test_requirements.rest,
]
@pytest.fixture(scope='module')
def ansible(appliance):
appliance.wait_for_embedded_ansible()
provider, __ = wait_for(
lambda: appliance.rest_api.collections.providers.find_by(
name='Embedded Ansible Automation Manager') or False,
num_sec=200,
delay=5
)
return provider[0]
@pytest.fixture(scope='function')
def repository(appliance, ansible):
collection = appliance.rest_api.collections.configuration_script_sources
uniq = fauxfactory.gen_alphanumeric(5)
repo_name = "test_repo_{}".format(uniq)
data = {
"name": repo_name,
"description": "Test Repo {}".format(uniq),
"manager_resource": {"href": ansible.href},
"related": {},
"scm_type": "git",
"scm_url": "https://github.com/quarckster/ansible_playbooks",
"scm_branch": "",
"scm_clean": False,
"scm_delete_on_update": False,
"scm_update_on_launch": False
}
collection.action.create(data)
assert_response(appliance)
repo_rest, __ = wait_for(
lambda: collection.find_by(name=repo_name) or False, num_sec=300, delay=5)
repo_rest = repo_rest[0]
yield repo_rest
if repo_rest.exists:
repo_rest.action.delete()
class TestReposRESTAPI(object):
@pytest.mark.parametrize(
'from_collection', [False, True], ids=['from_detail', 'from_collection'])
def test_edit_repository(self, appliance, repository, from_collection):
"""Tests editing repositories using REST API.
Metadata:
test_flag: rest
Polarion:
assignee: pvala
casecomponent: Ansible
caseimportance: medium
initialEstimate: 1/4h
"""
new_description = "Test Repository {}".format(fauxfactory.gen_alphanumeric(5))
if from_collection:
repository.reload()
repository_data_edited = {
"href": repository.href,
"description": new_description,
}
appliance.rest_api.collections.configuration_script_sources.action.edit(
repository_data_edited)
else:
repository.action.edit(description=new_description)
assert_response(appliance)
record, __ = wait_for(
lambda: appliance.rest_api.collections.configuration_script_sources.find_by(
description=new_description) or False,
num_sec=180,
delay=10,
)
repository.reload()
assert repository.description == record[0].description
@pytest.mark.parametrize('method', ['post', 'delete'], ids=['POST', 'DELETE'])
def test_delete_repository_from_detail(self, appliance, repository, method):
"""Deletes repository from detail using REST API
Metadata:
test_flag: rest
Polarion:
assignee: pvala
casecomponent: Ansible
caseimportance: medium
initialEstimate: 1/4h
Bugzilla:
1477520
"""
del_action = getattr(repository.action.delete, method.upper())
del_action()
assert_response(appliance)
repository.wait_not_exists(num_sec=300, delay=5)
with pytest.raises(Exception, match='ActiveRecord::RecordNotFound'):
del_action()
assert_response(appliance, http_status=404)
def test_delete_repository_from_collection(self, appliance, repository):
"""Deletes repository from collection using REST API
Metadata:
test_flag: rest
Polarion:
assignee: pvala
casecomponent: Ansible
caseimportance: medium
initialEstimate: 1/4h
"""
delete_resources_from_collection([repository], not_found=False, num_sec=300, delay=5)
class TestPayloadsRESTAPI(object):
def test_payloads_collection(self, appliance, repository):
"""Checks the configuration_script_payloads collection using REST API.
Metadata:
test_flag: rest
Polarion:
assignee: pvala
casecomponent: Ansible
caseimportance: medium
initialEstimate: 1/4h
"""
collection = appliance.rest_api.collections.configuration_script_payloads
collection.reload()
assert collection.all
for payload in collection.all:
assert 'AutomationManager::Playbook' in payload.type
def test_authentications_subcollection(self, appliance, repository):
"""Checks the authentications subcollection using REST API.
Metadata:
test_flag: rest
Polarion:
assignee: pvala
casecomponent: Ansible
caseimportance: medium
initialEstimate: 1/4h
"""
script_payloads = appliance.rest_api.collections.configuration_script_payloads
script_payloads.reload()
assert script_payloads[-1].authentications.name
def test_payloads_subcollection(self, appliance, repository):
"""Checks the configuration_script_payloads subcollection using REST API.
Metadata:
test_flag: rest
Polarion:
assignee: pvala
casecomponent: Ansible
caseimportance: medium
initialEstimate: 1/4h
"""
script_sources = appliance.rest_api.collections.configuration_script_sources
script_sources.reload()
assert script_sources[-1].configuration_script_payloads
|
unknown
|
codeparrot/codeparrot-clean
| ||
'''
Created on Oct 8, 2013
Check tcp data to smtp por 25 on pp0 interface:
tcpdump -A -vv -i ppp0 port 25
@author Vitor Borrego
'''
import whois
import time
import datetime
import plistlib
from os.path import expanduser
import smtplib
import syslog
import os
from time import strptime
import re
#from datetime import datetime
class Logger(object):
'''
Logs messages to syslog, usually /var/log/messages
'''
@staticmethod
def log(msg):
syslog.syslog("[domainChecker] %s" % (msg))
class SendMail(object):
'''
Sends an email message
'''
def __init__(self, config, message):
s = smtplib.SMTP()
header = 'From:%s\nTo:%s\nSubject:%s\n\n' % (config.getMailFrom(), config.getMailTo(), config.getSubject())
try:
(code, response) = s.connect(config.getServer() , config.getPort()) # expects 220
if code == 220:
(code, response) = s.ehlo('domainChecker') # expects 250
else:
raise Exception('Connect failed')
if config.getUseStartTLS()==True:
if code == 250:
(code, response) = s.starttls() # expects 220
else:
raise Exception('Ehlo failed')
if code == 220 or code==250: # 220 use starttls, 250 dont use starttls
(code, response) = s.login(config.getUser(), config.getPassword()) # expects 235
else:
raise Exception('StartTLS failed')
if code == 235:
Logger.log("Before sendmail:%s" % (message))
(code, response) = s.sendmail(config.getMailFrom(), config.getMailTo(), "%s%s" % (header, message))
else:
raise Exception('Login failed')
except Exception as e:
Logger.log("SendMail exception: %s" % (e))
s.quit()
class Domain(object):
'''
Stores info about domain expiration
'''
def __init__(self, domain, status, expirationDate, text):
self.exception = ''
try: # may give an error if no expiration date is found in whois registry
#print expirationDate
currTime = time.time()
self.domain = domain
self.status = status
self.text = text
self.expirationDate = expirationDate
self.expireDays = expirationDate - datetime.datetime.fromtimestamp(currTime)
except Exception as ex:
self.expireDays = 0
self.expirationDate = None
self.exception = str(ex)
def __repr__(self):
if len(self.exception) == 0:
return "Domain:%s\nExpiration date: %s\nExpires in: %s\n\n" % (self.domain, self.expirationDate, self.expireDays)
else:
return "Domain:%s\nExpiration date: %s\nText: %s\nException: %s\n\n" % (self.domain, self.expirationDate, self.text, self.exception)
class Config(object):
def __init__(self):
homeDir = expanduser("~") # get current user home dir
configFilePath = os.path.join(homeDir, '.domainChecker.plist')
config = plistlib.readPlist(configFilePath) # load domains to check
self.domains = config['domains']
self.server = config['mailSettings']['server']
self.port = config['mailSettings']['port']
self.user = config['mailSettings']['user']
self.password = config['mailSettings']['pass']
self.mailfrom = config['mailSettings']['from']
self.mailto = config['mailSettings']['to']
self.useStartTLS = config['mailSettings']['useStartTLS']
self.subject = config['mailSettings']['subject']
def getDomains(self):
return self.domains
def getServer(self):
return self.server
def getPort(self):
return self.port
def getUser(self):
return self.user
def getPassword(self):
return self.password
def getMailFrom(self):
return self.mailfrom
def getMailTo(self):
return self.mailto
def getUseStartTLS(self):
return self.useStartTLS
def getSubject(self):
return self.subject
@staticmethod
def createDummyConfig():
x = {}
x['domains'] = ['aaa', 'bbb', 'ccc']
x['mailSettings'] = {
'server':'mail.example.net',
'port':25,
'user':'test',
'pass':'test',
'from':'test@example.net',
'to':'tset@example.net',
'useStartTLS':False,
'subject':'Domain checker'
}
plistlib.writePlist(x, 'dummyConfig.plist')
class Checker(object):
def __init__(self):
config = Config()
domainStatus = []
for domain in config.getDomains():
whoisRes = whois.whois(domain)
# a list of expire dates may be created with multiple types in it (datetime.datetime or string)
expireDate = None
if type(whoisRes.expiration_date) is list:
for item in whoisRes.expiration_date:
if type(item) is datetime.datetime:
expireDate = item
if type(whoisRes.expiration_date) is datetime.datetime:
expireDate = whoisRes.expiration_date
if type(whoisRes.expiration_date) is str:
#print "expiration_date is str type"
expireDate = datetime.datetime.strptime(whoisRes.expiration_date,"%Y-%m-%dT%H:%M:%S.0Z")
if expireDate == None:
#use expires since the regular expression for *.com may not be equal on all *.com domains
#print "Is none"
listx = re.findall('expires:\s*(.+)',whoisRes.text,re.IGNORECASE)
if(len(listx)>0): expireDate = datetime.datetime.strptime(listx[0],'%Y-%m-%d %H:%M:%S')
if whoisRes.domain_name == None or len(whoisRes.domain_name) == 0:
#print "Domain name is empty"
listx = re.findall('\ndomain:\s*(.+)\n',whoisRes.text,re.IGNORECASE)
#print listx
#print whoisRes.text
if(len(listx)>0): whoisRes.domain_name = listx[0]
domainStatus.append(Domain(whoisRes.domain_name, whoisRes.status , expireDate, str(whoisRes)))
# send the results to the define email
msg = ''
for status in domainStatus:
msg = "%s%s\n" % (msg , status)
Logger.log(msg)
SendMail(config, msg)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: clc_alert_policy
short_description: Create or Delete Alert Policies at CenturyLink Cloud.
description:
- An Ansible module to Create or Delete Alert Policies at CenturyLink Cloud.
version_added: "2.0"
options:
alias:
description:
- The alias of your CLC Account
required: True
name:
description:
- The name of the alert policy. This is mutually exclusive with id
required: False
default: None
id:
description:
- The alert policy id. This is mutually exclusive with name
required: False
default: None
alert_recipients:
description:
- A list of recipient email ids to notify the alert.
This is required for state 'present'
required: False
default: None
metric:
description:
- The metric on which to measure the condition that will trigger the alert.
This is required for state 'present'
required: False
default: None
choices: ['cpu','memory','disk']
duration:
description:
- The length of time in minutes that the condition must exceed the threshold.
This is required for state 'present'
required: False
default: None
threshold:
description:
- The threshold that will trigger the alert when the metric equals or exceeds it.
This is required for state 'present'
This number represents a percentage and must be a value between 5.0 - 95.0 that is a multiple of 5.0
required: False
default: None
state:
description:
- Whether to create or delete the policy.
required: False
default: present
choices: ['present','absent']
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
---
- name: Create Alert Policy Example
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Create an Alert Policy for disk above 80% for 5 minutes
clc_alert_policy:
alias: wfad
name: 'alert for disk > 80%'
alert_recipients:
- test1@centurylink.com
- test2@centurylink.com
metric: 'disk'
duration: '00:05:00'
threshold: 80
state: present
register: policy
- name: debug
debug: var=policy
---
- name: Delete Alert Policy Example
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Delete an Alert Policy
clc_alert_policy:
alias: wfad
name: 'alert for disk > 80%'
state: absent
register: policy
- name: debug
debug: var=policy
'''
RETURN = '''
policy:
description: The alert policy information
returned: success
type: dict
sample:
{
"actions": [
{
"action": "email",
"settings": {
"recipients": [
"user1@domain.com",
"user1@domain.com"
]
}
}
],
"id": "ba54ac54a60d4a4f1ed6d48c1ce240a7",
"links": [
{
"href": "/v2/alertPolicies/alias/ba54ac54a60d4a4fb1d6d48c1ce240a7",
"rel": "self",
"verbs": [
"GET",
"DELETE",
"PUT"
]
}
],
"name": "test_alert",
"triggers": [
{
"duration": "00:05:00",
"metric": "disk",
"threshold": 80.0
}
]
}
'''
__version__ = '${version}'
import json
import os
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
try:
import clc as clc_sdk
from clc import APIFailedResponse
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule
class ClcAlertPolicy:
clc = clc_sdk
module = None
def __init__(self, module):
"""
Construct module
"""
self.module = module
self.policy_dict = {}
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
name=dict(default=None),
id=dict(default=None),
alias=dict(required=True, default=None),
alert_recipients=dict(type='list', default=None),
metric=dict(
choices=[
'cpu',
'memory',
'disk'],
default=None),
duration=dict(type='str', default=None),
threshold=dict(type='int', default=None),
state=dict(default='present', choices=['present', 'absent'])
)
mutually_exclusive = [
['name', 'id']
]
return {'argument_spec': argument_spec,
'mutually_exclusive': mutually_exclusive}
# Module Behavior Goodness
def process_request(self):
"""
Process the request - Main Code Path
:return: Returns with either an exit_json or fail_json
"""
p = self.module.params
self._set_clc_credentials_from_env()
self.policy_dict = self._get_alert_policies(p['alias'])
if p['state'] == 'present':
changed, policy = self._ensure_alert_policy_is_present()
else:
changed, policy = self._ensure_alert_policy_is_absent()
self.module.exit_json(changed=changed, policy=policy)
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
def _ensure_alert_policy_is_present(self):
"""
Ensures that the alert policy is present
:return: (changed, policy)
changed: A flag representing if anything is modified
policy: the created/updated alert policy
"""
changed = False
p = self.module.params
policy_name = p.get('name')
if not policy_name:
self.module.fail_json(msg='Policy name is a required')
policy = self._alert_policy_exists(policy_name)
if not policy:
changed = True
policy = None
if not self.module.check_mode:
policy = self._create_alert_policy()
else:
changed_u, policy = self._ensure_alert_policy_is_updated(policy)
if changed_u:
changed = True
return changed, policy
def _ensure_alert_policy_is_absent(self):
"""
Ensures that the alert policy is absent
:return: (changed, None)
changed: A flag representing if anything is modified
"""
changed = False
p = self.module.params
alert_policy_id = p.get('id')
alert_policy_name = p.get('name')
alias = p.get('alias')
if not alert_policy_id and not alert_policy_name:
self.module.fail_json(
msg='Either alert policy id or policy name is required')
if not alert_policy_id and alert_policy_name:
alert_policy_id = self._get_alert_policy_id(
self.module,
alert_policy_name)
if alert_policy_id and alert_policy_id in self.policy_dict:
changed = True
if not self.module.check_mode:
self._delete_alert_policy(alias, alert_policy_id)
return changed, None
def _ensure_alert_policy_is_updated(self, alert_policy):
"""
Ensures the alert policy is updated if anything is changed in the alert policy configuration
:param alert_policy: the target alert policy
:return: (changed, policy)
changed: A flag representing if anything is modified
policy: the updated the alert policy
"""
changed = False
p = self.module.params
alert_policy_id = alert_policy.get('id')
email_list = p.get('alert_recipients')
metric = p.get('metric')
duration = p.get('duration')
threshold = p.get('threshold')
policy = alert_policy
if (metric and metric != str(alert_policy.get('triggers')[0].get('metric'))) or \
(duration and duration != str(alert_policy.get('triggers')[0].get('duration'))) or \
(threshold and float(threshold) != float(alert_policy.get('triggers')[0].get('threshold'))):
changed = True
elif email_list:
t_email_list = list(
alert_policy.get('actions')[0].get('settings').get('recipients'))
if set(email_list) != set(t_email_list):
changed = True
if changed and not self.module.check_mode:
policy = self._update_alert_policy(alert_policy_id)
return changed, policy
def _get_alert_policies(self, alias):
"""
Get the alert policies for account alias by calling the CLC API.
:param alias: the account alias
:return: the alert policies for the account alias
"""
response = {}
policies = self.clc.v2.API.Call('GET',
'/v2/alertPolicies/%s'
% alias)
for policy in policies.get('items'):
response[policy.get('id')] = policy
return response
def _create_alert_policy(self):
"""
Create an alert Policy using the CLC API.
:return: response dictionary from the CLC API.
"""
p = self.module.params
alias = p['alias']
email_list = p['alert_recipients']
metric = p['metric']
duration = p['duration']
threshold = p['threshold']
policy_name = p['name']
arguments = json.dumps(
{
'name': policy_name,
'actions': [{
'action': 'email',
'settings': {
'recipients': email_list
}
}],
'triggers': [{
'metric': metric,
'duration': duration,
'threshold': threshold
}]
}
)
try:
result = self.clc.v2.API.Call(
'POST',
'/v2/alertPolicies/%s' % alias,
arguments)
except APIFailedResponse as e:
return self.module.fail_json(
msg='Unable to create alert policy "{0}". {1}'.format(
policy_name, str(e.response_text)))
return result
def _update_alert_policy(self, alert_policy_id):
"""
Update alert policy using the CLC API.
:param alert_policy_id: The clc alert policy id
:return: response dictionary from the CLC API.
"""
p = self.module.params
alias = p['alias']
email_list = p['alert_recipients']
metric = p['metric']
duration = p['duration']
threshold = p['threshold']
policy_name = p['name']
arguments = json.dumps(
{
'name': policy_name,
'actions': [{
'action': 'email',
'settings': {
'recipients': email_list
}
}],
'triggers': [{
'metric': metric,
'duration': duration,
'threshold': threshold
}]
}
)
try:
result = self.clc.v2.API.Call(
'PUT', '/v2/alertPolicies/%s/%s' %
(alias, alert_policy_id), arguments)
except APIFailedResponse as e:
return self.module.fail_json(
msg='Unable to update alert policy "{0}". {1}'.format(
policy_name, str(e.response_text)))
return result
def _delete_alert_policy(self, alias, policy_id):
"""
Delete an alert policy using the CLC API.
:param alias : the account alias
:param policy_id: the alert policy id
:return: response dictionary from the CLC API.
"""
try:
result = self.clc.v2.API.Call(
'DELETE', '/v2/alertPolicies/%s/%s' %
(alias, policy_id), None)
except APIFailedResponse as e:
return self.module.fail_json(
msg='Unable to delete alert policy id "{0}". {1}'.format(
policy_id, str(e.response_text)))
return result
def _alert_policy_exists(self, policy_name):
"""
Check to see if an alert policy exists
:param policy_name: name of the alert policy
:return: boolean of if the policy exists
"""
result = False
for policy_id in self.policy_dict:
if self.policy_dict.get(policy_id).get('name') == policy_name:
result = self.policy_dict.get(policy_id)
return result
def _get_alert_policy_id(self, module, alert_policy_name):
"""
retrieves the alert policy id of the account based on the name of the policy
:param module: the AnsibleModule object
:param alert_policy_name: the alert policy name
:return: alert_policy_id: The alert policy id
"""
alert_policy_id = None
for policy_id in self.policy_dict:
if self.policy_dict.get(policy_id).get('name') == alert_policy_name:
if not alert_policy_id:
alert_policy_id = policy_id
else:
return module.fail_json(
msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
return alert_policy_id
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
argument_dict = ClcAlertPolicy._define_module_argument_spec()
module = AnsibleModule(supports_check_mode=True, **argument_dict)
clc_alert_policy = ClcAlertPolicy(module)
clc_alert_policy.process_request()
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
import logging
import sys
from fackup.config import config
def setup_logging(verbose, quiet, logger=None):
if not logger:
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
fmt = logging.Formatter('[%(asctime)s] [%(process)d] ' \
'%(levelname)-5s (%(name)s) %(message)s',
'%Y-%m-%d %H:%M:%S')
logfile = None
if 'logging' in config['general'].keys():
logfile = config['general']['logging'].get('file')
if logfile:
logfile_level = config['general']['logging'].get('level', 'info')
logfile_level = logfile_level.lower()
f = logging.FileHandler(logfile)
if logfile_level == "error":
f.setLevel(logging.ERROR)
elif logfile_level == "debug":
f.setLevel(logging.DEBUG)
else:
f.setLevel(logging.INFO)
f.setFormatter(fmt)
logger.addHandler(f)
if verbose:
stdout = logging.StreamHandler(sys.stdout)
if verbose >= 2:
stdout.setLevel(logging.DEBUG)
else:
stdout.setLevel(logging.INFO)
stdout.setFormatter(fmt)
logger.addHandler(stdout)
if not quiet and not verbose:
stderr = logging.StreamHandler()
stderr.setLevel(logging.ERROR)
stderr.setFormatter(fmt)
logger.addHandler(stderr)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2003 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#------------------------------------------------------------------------
#
#
#
#------------------------------------------------------------------------
class TabbedDoc:
def __init__(self, columns):
self.columns = columns
self.name = ""
def creator(self, name):
self.name = name
def open(self,filename):
pass
def close(self):
pass
def start_page(self):
pass
def end_page(self):
pass
def start_paragraph(self):
pass
def end_paragraph(self):
pass
def start_table(self):
pass
def end_table(self):
pass
def start_row(self):
pass
def end_row(self):
pass
def write_cell(self, text):
pass
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import logging
import traceback
import argparse
import pyOCD.board.mbed_board
from pyOCD import __version__
from pyOCD.gdbserver import GDBServer
from pyOCD.board import MbedBoard
from pyOCD.utility.cmdline import split_command_line
import pyOCD.board.mbed_board
LEVELS = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}
supported_targets = pyOCD.target.TARGET.keys()
debug_levels = LEVELS.keys()
class GDBServerTool(object):
def __init__(self):
self.args = None
self.gdb_server_settings = None
self.echo_msg = None
def build_parser(self):
# Keep args in snyc with flash_tool.py when possible
parser = argparse.ArgumentParser(description='PyOCD GDB Server')
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument("-p", "--port", dest = "port_number", type=int, default = 3333, help = "Write the port number that GDB server will open.")
parser.add_argument("-T", "--telnet-port", dest="telnet_port", type=int, default=4444, help="Specify the telnet port for semihosting.")
parser.add_argument("-b", "--board", dest = "board_id", default = None, help="Connect to board by board id. Use -l to list all connected boards.")
parser.add_argument("-l", "--list", action = "store_true", dest = "list_all", default = False, help = "List all connected boards.")
parser.add_argument("-d", "--debug", dest = "debug_level", choices = debug_levels, default = 'info', help = "Set the level of system logging output. Supported choices are: "+", ".join(debug_levels), metavar="LEVEL")
parser.add_argument("-t", "--target", dest = "target_override", choices=supported_targets, default = None, help = "Override target to debug. Supported targets are: "+", ".join(supported_targets), metavar="TARGET")
parser.add_argument("-n", "--nobreak", dest = "break_at_hardfault", default = True, action="store_false", help = "Disable halt at hardfault handler." )
parser.add_argument("-r", "--reset-break", dest = "break_on_reset", default = False, action="store_true", help = "Halt the target when reset." )
parser.add_argument("-s", "--step-int", dest = "step_into_interrupt", default = False, action="store_true", help = "Allow single stepping to step into interrupts." )
parser.add_argument("-f", "--frequency", dest = "frequency", default = 1000000, type=int, help = "Set the SWD clock frequency in Hz." )
parser.add_argument("-o", "--persist", dest = "persist", default = False, action="store_true", help = "Keep GDB server running even after remote has detached.")
parser.add_argument("-bh", "--soft-bkpt-as-hard", dest = "soft_bkpt_as_hard", default = False, action = "store_true", help = "Replace software breakpoints with hardware breakpoints.")
group = parser.add_mutually_exclusive_group()
group.add_argument("-ce", "--chip_erase", action="store_true",help="Use chip erase when programming.")
group.add_argument("-se", "--sector_erase", action="store_true",help="Use sector erase when programming.")
# -Currently "--unlock" does nothing since kinetis parts will automatically get unlocked
parser.add_argument("-u", "--unlock", action="store_true", default=False, help="Unlock the device.")
# reserved: "-a", "--address"
# reserved: "-s", "--skip"
parser.add_argument("-hp", "--hide_progress", action="store_true", help = "Don't display programming progress." )
parser.add_argument("-fp", "--fast_program", action="store_true", help = "Use only the CRC of each page to determine if it already has the same data.")
parser.add_argument("-S", "--semihosting", dest="enable_semihosting", action="store_true", help="Enable semihosting.")
parser.add_argument("-G", "--gdb-syscall", dest="semihost_use_syscalls", action="store_true", help="Use GDB syscalls for semihosting file I/O.")
parser.add_argument("-c", "--command", dest="commands", metavar="CMD", action='append', nargs='+', help="Run command (OpenOCD compatibility).")
return parser
def get_chip_erase(self, args):
# Determine programming mode
chip_erase = None
if args.chip_erase:
chip_erase = True
elif args.sector_erase:
chip_erase = False
return chip_erase
def get_gdb_server_settings(self, args):
# Set gdb server settings
return {
'break_at_hardfault' : args.break_at_hardfault,
'step_into_interrupt' : args.step_into_interrupt,
'break_on_reset' : args.break_on_reset,
'persist' : args.persist,
'soft_bkpt_as_hard' : args.soft_bkpt_as_hard,
'chip_erase': self.get_chip_erase(args),
'hide_programming_progress' : args.hide_progress,
'fast_program' : args.fast_program,
'server_listening_callback' : self.server_listening,
'enable_semihosting' : args.enable_semihosting,
'telnet_port' : args.telnet_port,
'semihost_use_syscalls' : args.semihost_use_syscalls,
}
def setup_logging(self, args):
level = LEVELS.get(args.debug_level, logging.NOTSET)
logging.basicConfig(level=level)
## @brief Handle OpenOCD commands for compatibility.
def process_commands(self, commands):
if commands is None:
return
for cmd_list in commands:
try:
cmd_list = split_command_line(cmd_list)
cmd = cmd_list[0]
if cmd == 'gdb_port':
if len(cmd_list) < 2:
print "Missing port argument"
else:
self.args.port_number = int(cmd_list[1], base=0)
elif cmd == 'telnet_port':
if len(cmd_list) < 2:
print "Missing port argument"
else:
self.gdb_server_settings['telnet_port'] = int(cmd_list[1], base=0)
elif cmd == 'echo':
self.echo_msg = ' '.join(cmd_list[1:])
else:
print "Unsupported command: %s" % ' '.join(cmd_list)
except IndexError:
pass
def server_listening(self, server):
print >>sys.stderr, self.echo_msg
sys.stderr.flush()
def run(self):
self.args = self.build_parser().parse_args()
self.gdb_server_settings = self.get_gdb_server_settings(self.args)
self.setup_logging(self.args)
self.process_commands(self.args.commands)
gdb = None
if self.args.list_all == True:
MbedBoard.listConnectedBoards()
else:
try:
board_selected = MbedBoard.chooseBoard(
board_id=self.args.board_id,
target_override=self.args.target_override,
frequency=self.args.frequency)
with board_selected as board:
# Boost speed with deferred transfers
board.transport.setDeferredTransfer(True)
gdb = GDBServer(board, self.args.port_number, self.gdb_server_settings)
while gdb.isAlive():
gdb.join(timeout=0.5)
except KeyboardInterrupt:
if gdb != None:
gdb.stop()
except Exception as e:
print "uncaught exception: %s" % e
traceback.print_exc()
if gdb != None:
gdb.stop()
return 1
# Successful exit.
return 0
def main():
sys.exit(GDBServerTool().run())
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.