hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f644bdae1c799ac19c0985d13b86858204d041b0 | 38,289 | py | Python | wavefunctions/31/one-shot.py | Jeffrey-Ede/One-Shot | 27696c0886b8d6b5f088ff1a93fadf5c3115b856 | [
"MIT"
] | 5 | 2020-03-25T10:27:46.000Z | 2021-11-26T07:31:15.000Z | wavefunctions/31/one-shot.py | Jeffrey-Ede/One-Shot | 27696c0886b8d6b5f088ff1a93fadf5c3115b856 | [
"MIT"
] | null | null | null | wavefunctions/31/one-shot.py | Jeffrey-Ede/One-Shot | 27696c0886b8d6b5f088ff1a93fadf5c3115b856 | [
"MIT"
] | 2 | 2020-01-31T00:21:28.000Z | 2021-11-30T09:08:38.000Z | # -*- coding: utf-8 -*-
"""
Deep learning supersampling network for scanning transmission electron microscopy.
This is a standard convolutional network i.e. with batch norm and L2 regularization.
Acknowledgement: Initial testing of this network was performed with CIFAR-10
in Google Colab
"""
import tensorflow as tf
import numpy as np
import cv2
import matplotlib.pyplot as plt
from tensorflow.contrib.layers.python.layers import initializers
import itertools
import time
from PIL import Image
import queue
EXPER_NUM = 31
cropsize = 144#192#224#256
use_batch_norm = True
batch_norm_decay = 0.999
use_vbn = False
use_instance_norm = False#True
adversarial = True
use_spectral_norm = True#True
use_gradient_penalty = False#True]
standard_wass = False
use_l2_loss = False
## Load data
def flip_rotate(img):
"""Applies a random flip || rotation to the image, possibly leaving it unchanged"""
choice = np.random.randint(0, 8)
if choice == 0:
return img
if choice == 1:
return np.rot90(img, 1)
if choice == 2:
return np.rot90(img, 2)
if choice == 3:
return np.rot90(img, 3)
if choice == 4:
return np.flip(img, 0)
if choice == 5:
return np.flip(img, 1)
if choice == 6:
return np.flip(np.rot90(img, 1), 0)
if choice == 7:
return np.flip(np.rot90(img, 1), 1)
def load_image(addr):
"""Read an image and make sure it is of the correct type. Optionally resize it"""
if type(addr) == bytes:
addr = addr.decode()
img = np.load(addr)
off_x = np.random.randint(0, 320-cropsize)
off_y = np.random.randint(0, 320-cropsize)
img = img[off_x:off_x+cropsize, off_y:off_y+cropsize]
img = flip_rotate(img)
return img
def scale0to1(img):
"""Rescale image between 0 and 1"""
img = img.astype(np.float32)
min = np.min(img)
max = np.max(img)
if np.absolute(min-max) < 1.e-6:
img.fill(0.5)
else:
img = (img-min) / (max-min)
return img.astype(np.float32)
def norm_img(img, min=None, max=None, get_min_and_max=False):
if min == None:
min = np.min(img)
if max == None:
max = np.max(img)
if np.absolute(min-max) < 1.e-6:
img.fill(0.)
else:
a = 0.5*(min+max)
b = 0.5*(max-min)
img = (img-a) / b
if get_min_and_max:
return img.astype(np.float32), (min, max)
else:
return img.astype(np.float32)
def preprocess(img):
img[np.isnan(img)] = 0.
img[np.isinf(img)] = 0.
return img
history = queue.Queue()
def record_parser(record):
"""Parse files and generate lower quality images from them."""
if np.random.randint(0,2) and history.qsize() > 100:
try:
(lq, img) = history.get()
return lq, img
except:
pass
img = flip_rotate(preprocess(load_image(record)))
lq = np.abs(img).astype(np.float32)
#img = np.angle(img).astype(np.float32)
#img = np.where(
# img < 0,
# 2*img/np.pi + 1,
# 1 - 2*img/np.pi
# )
#img = (img.real/lq).astype(np.float32)
angle = np.angle(img)
img = np.stack((np.cos(angle), np.sin(angle)), axis=-1).astype(np.float32)
if np.sum(np.isfinite(img)) != np.product(img.shape) or np.sum(np.isfinite(lq)) != np.product(lq.shape):
img = np.zeros((cropsize,cropsize,2))
lq = np.zeros((cropsize,cropsize))
try:
history.put( (lq, img) )
except:
pass
return lq, img
def shaper(lq, img):
lq = tf.reshape(lq, [cropsize, cropsize, 1])
img = tf.reshape(img, [cropsize, cropsize, 2])
return lq, img
def load_data(dir, subset, batch_size):
"""Create a dataset from a list of filenames and shard batches from it"""
with tf.device('/cpu:0'):
dataset = tf.data.Dataset.list_files(dir+subset+"/"+"*.npy")
dataset = dataset.shuffle(buffer_size=5000)
dataset = dataset.repeat()
dataset = dataset.map(
lambda file: tf.py_func(record_parser, [file], [tf.float32, tf.float32])
)
dataset = dataset.map(shaper)
dataset = dataset.batch(batch_size=batch_size)
dataset = dataset.prefetch(buffer_size=10)
iters = dataset.make_one_shot_iterator().get_next()
#Add batch dimension size to graph
for iter in iters:
iter.set_shape([batch_size]+iter.get_shape().as_list()[1:])
return iters
# Utility
def flip_and_rotate(x):
"""Random combination of flips and rotations."""
for augmentator in [flip, rotate]:
x = augmentator(x)
return x
def rotate(x: tf.Tensor) -> tf.Tensor:
"""Rotation augmentation
Args:
x: Image
Returns:
Augmented image
"""
# Rotate 0, 90, 180, 270 degrees
return tf.image.rot90(x, tf.random_uniform(shape=[], minval=0, maxval=4, dtype=tf.int32))
def flip(x: tf.Tensor) -> tf.Tensor:
"""Flip augmentation
Args:
x: Image to flip
Returns:
Augmented image
"""
x = tf.image.random_flip_left_right(x)
x = tf.image.random_flip_up_down(x)
return x
def auto_name(name):
"""Append number to variable name to make it unique.
Inputs:
name: Start of variable name.
Returns:
Full variable name with number afterwards to make it unique.
"""
scope = tf.contrib.framework.get_name_scope()
vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope)
names = [v.name for v in vars]
#Increment variable number until unused name is found
for i in itertools.count():
short_name = name + "_" + str(i)
sep = "/" if scope != "" else ""
full_name = scope + sep + short_name
if not full_name in [n[:len(full_name)] for n in names]:
return short_name
def alrc(
loss,
num_stddev=3,
decay=0.997,
mu1_start=5,
mu2_start=7**2,
in_place_updates=True
):
"""Adaptive learning rate clipping (ALRC) of outlier losses.
Inputs:
loss: Loss function to limit outlier losses of.
num_stddev: Number of standard deviation above loss mean to limit it
to.
decay: Decay rate for exponential moving averages used to track the first
two raw moments of the loss.
mu1_start: Initial estimate for the first raw moment of the loss.
mu2_start: Initial estimate for the second raw moment of the loss.
in_place_updates: If False, add control dependencies for moment tracking
to tf.GraphKeys.UPDATE_OPS. This allows the control dependencies to be
executed in parallel with other dependencies later.
Return:
Loss function with control dependencies for ALRC.
"""
#Varables to track first two raw moments of the loss
mu = tf.get_variable(
auto_name("mu1"),
initializer=tf.constant(mu1_start, dtype=tf.float32))
mu2 = tf.get_variable(
auto_name("mu2"),
initializer=tf.constant(mu2_start, dtype=tf.float32))
#Use capped loss for moment updates to limit the effect of outlier losses on the threshold
sigma = tf.sqrt(mu2 - mu**2+1.e-8)
loss = tf.where(loss < mu+num_stddev*sigma,
loss,
loss/tf.stop_gradient(loss/(mu+num_stddev*sigma)))
#Update moment moving averages
mean_loss = tf.reduce_mean(loss)
mean_loss2 = tf.reduce_mean(loss**2)
update_ops = [mu.assign(decay*mu+(1-decay)*mean_loss),
mu2.assign(decay*mu2+(1-decay)*mean_loss2)]
if in_place_updates:
with tf.control_dependencies(update_ops):
loss = tf.identity(loss)
else:
#Control dependencies that can be executed in parallel with other update
#ops. Often, these dependencies are added to train ops e.g. alongside
#batch normalization update ops.
for update_op in update_ops:
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_op)
return loss
def spectral_norm(w, iteration=1, in_place_updates=True):
"""Spectral normalization. It imposes Lipschitz continuity by constraining the
spectral norm (maximum singular value) of weight matrices.
Inputs:
w: Weight matrix to spectrally normalize.
iteration: Number of times to apply the power iteration method to
enforce spectral norm.
Returns:
Weight matrix with spectral normalization control dependencies.
"""
if not use_spectral_norm:
return w
w0 = w
w_shape = w.shape.as_list()
w = tf.reshape(w, [-1, w_shape[-1]])
u = tf.get_variable(auto_name("u"),
[1, w_shape[-1]],
initializer=tf.random_normal_initializer(mean=0.,stddev=0.03),
trainable=False)
u_hat = u
v_hat = None
for i in range(iteration):
"""
power iteration
Usually iteration = 1 will be enough
"""
v_ = tf.matmul(u_hat, tf.transpose(w))
v_hat = tf.nn.l2_normalize(v_)
u_ = tf.matmul(v_hat, w)
u_hat = tf.nn.l2_normalize(u_)
u_hat = tf.stop_gradient(u_hat)
v_hat = tf.stop_gradient(v_hat)
sigma = tf.matmul(tf.matmul(v_hat, w), tf.transpose(u_hat))
if in_place_updates:
#In-place control dependencies bottlenect training
with tf.control_dependencies([u.assign(u_hat)]):
w_norm = w / sigma
w_norm = tf.reshape(w_norm, w_shape)
else:
#Execute control dependency in parallel with other update ops
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, u.assign(u_hat))
w_norm = w / sigma
w_norm = tf.reshape(w_norm, w_shape)
return w_norm
def spectral_norm_dense(
inputs,
num_outputs,
biases_initializer=tf.zeros_initializer()
):
w = tf.get_variable(auto_name("weights"), shape=[inputs.get_shape()[-1], num_outputs])
x = tf.matmul(inputs, spectral_norm(w))
if biases_initializer != None:
b = tf.get_variable(auto_name("bias"), [num_outputs], initializer=biases_initializer)
x = tf.nn.bias_add(x, b)
return x
def spectral_norm_conv(inputs,
num_outputs,
stride=1,
kernel_size=3,
padding='VALID',
biases_initializer=tf.zeros_initializer()):
"""Convolutional layer with spectrally normalized weights."""
w = tf.get_variable(auto_name("kernel"), shape=[kernel_size, kernel_size, inputs.get_shape()[-1], num_outputs])
x = tf.nn.conv2d(input=inputs, filter=spectral_norm(w),
strides=[1, stride, stride, 1], padding=padding)
if biases_initializer != None:
b = tf.get_variable(auto_name("bias"), [num_outputs], initializer=biases_initializer)
x = tf.nn.bias_add(x, b)
return x
std_actv = lambda x: tf.nn.leaky_relu(x, alpha=0.1)
def conv(
inputs,
num_outputs,
kernel_size=3,
stride=1,
padding='SAME',
data_format="NHWC",
actv_fn=std_actv,
is_batch_norm=True,
is_spectral_norm=False,
is_depthwise_sep=False,
extra_batch_norm=False,
biases_initializer=tf.zeros_initializer,
weights_initializer=initializers.xavier_initializer,
transpose=False,
is_training=True
):
"""Convenience function for a strided convolutional or transpositional
convolutional layer.
Intro: https://towardsdatascience.com/intuitively-understanding-convolutions-for-deep-learning-1f6f42faee1.
The order is: Activation (Optional) -> Batch Normalization (optional) -> Convolutions.
Inputs:
inputs: Tensor of shape `[batch_size, height, width, channels]` to apply
convolutions to.
num_outputs: Number of feature channels to output.
kernel_size: Side lenth of square convolutional kernels.
stride: Distance between convolutional kernel applications.
padding: 'SAME' for zero padding where kernels go over the edge.
'VALID' to discard features where kernels go over the edge.
activ_fn: non-linearity to apply after summing convolutions.
is_batch_norm: If True, add batch normalization after activation.
is_spectral_norm: If True, spectrally normalize weights.
is_depthwise_sep: If True, depthwise separate convolutions into depthwise
spatial convolutions, then 1x1 pointwise convolutions.
extra_batch_norm: If True and convolutions are depthwise separable, implement
batch normalization between depthwise and pointwise convolutions.
biases_initializer: Function to initialize biases with. None for no biases.
weights_initializer: Function to initialize weights with. None for no weights.
transpose: If True, apply convolutional layer transpositionally to the
described convolutional layer.
is_training: If True, use training specific operations e.g. batch normalization
update ops.
Returns:
Output of convolutional layer.
"""
x = inputs
num_spatial_dims = len(x.get_shape().as_list()) - 2
if biases_initializer == None:
biases_initializer = lambda: None
if weights_initializer == None:
weights_initializer = lambda: None
if not is_spectral_norm:
#Convolutional layer without spectral normalization
if transpose:
stride0 = 1
if type(stride) == list or is_depthwise_sep or stride % 1:
#Apparently there is no implementation of transpositional
#depthwise separable convolutions, so bilinearly upsample then
#depthwise separably convolute
if kernel_size != 1:
x = tf.image.resize_bilinear(
images=x,
size=stride if type(stride) == list else \
[int(stride*d) for d in x.get_shape().as_list()[1:3]],
align_corners=True
)
stride0 = stride
stride = 1
if type(stride0) == list and not is_depthwise_sep:
layer = tf.contrib.layers.conv2d
elif is_depthwise_sep:
layer = tf.contrib.layers.separable_conv2d
else:
layer = tf.contrib.layers.conv2d_transpose
x = layer(
inputs=x,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
padding=padding,
data_format=data_format,
activation_fn=None,
weights_initializer=weights_initializer(),
biases_initializer=biases_initializer())
if type(stride0) != list:
if (is_depthwise_sep or stride0 % 1) and kernel_size == 1:
x = tf.image.resize_bilinear(
images=x,
size=[int(stride0*d) for d in x.get_shape().as_list()[1:3]],
align_corners=True
)
else:
if num_spatial_dims == 1:
layer = tf.contrib.layers.conv1d
elif num_spatial_dims == 2:
if is_depthwise_sep:
layer = tf.contrib.layers.separable_conv2d
else:
layer = tf.contrib.layers.conv2d
x = layer(
inputs=x,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
padding=padding,
data_format=data_format,
activation_fn=None,
weights_initializer=weights_initializer(),
biases_initializer=biases_initializer())
else:
#Weights are spectrally normalized
x = spectral_norm_conv(
inputs=x,
num_outputs=num_outputs,
stride=stride,
kernel_size=kernel_size,
padding=padding,
biases_initializer=biases_initializer())
if actv_fn:
x = actv_fn(x)
if is_batch_norm and use_batch_norm:
x = tf.contrib.layers.batch_norm(x, is_training=is_training)
return x
def residual_block(inputs, skip=3, is_training=True):
"""Residual block whre the input is added to the signal after skipping some
layers. This architecture is good for learning purturbative transformations.
If no layer is provided, it defaults to a convolutional layer.
Deep residual learning: https://arxiv.org/abs/1512.03385.
Inputs:
inputs: Tensor to apply residual block to. Outputs of every layer will
have the same shape.
skip: Number of layers to skip before adding input to layer output.
layer: Layer to apply in residual block. Defaults to convolutional
layer. Custom layers must support `inputs`, `num_outputs` and `is_training`
arguments.
Returns:
Final output of residual block.
"""
x = x0 = inputs
def layer(inputs, num_outputs, is_training, is_batch_norm, actv_fn):
x = conv(
inputs=inputs,
num_outputs=num_outputs,
is_training=is_training,
actv_fn=actv_fn
)
return x
for i in range(skip):
x = layer(
inputs=x,
num_outputs=x.get_shape()[-1],
is_training=is_training,
is_batch_norm=i < skip - 1,
actv_fn=tf.nn.relu
)
x += x0
if use_batch_norm:
x = tf.contrib.layers.batch_norm(x, is_training=is_training)
return x
def transpose_Xception(
inputs,
num_outputs,
stride=2,
actv_fn=tf.nn.relu,
is_batch_norm=True,
is_training=True
):
"""Transpositional Xception block for upsampling; rather than downsampling."""
x = inputs
if actv_fn:
x = actv_fn(x)
if is_batch_norm:
x = tf.contrib.layers.batch_norm(x, is_training=is_training)
x0 = conv(
inputs=x,
num_outputs=num_outputs,
kernel_size=1,
stride=stride,
is_batch_norm=False,
is_depthwise_sep=True,
transpose=True
)
x = conv(
inputs=x,
num_outputs=num_outputs,
kernel_size=3,
stride=stride,
is_batch_norm=False,
is_depthwise_sep=True,
transpose=True
)
x = conv(
inputs=x,
num_outputs=num_outputs,
is_depthwise_sep=True,
)
x = conv(
inputs=x,
num_outputs=num_outputs,
is_depthwise_sep=True,
)
print(x0, x)
x += x0
return x
def generator(inputs, num_outputs, is_training, is_depthwise_sep=False):
"""Convolutional neural network (CNN) for image supersampling.
Args:
Inputs: Images tensor with shape [batch_size, heigh, width, channels].
num_outputs: Number of channels in network output.
is_training: Bool indicating whether to use training operations
Returns:
Super-sampled images
"""
base_size = 32
x = inputs
x = tf.contrib.layers.batch_norm(x, is_training=is_training)
x = conv(
x,
num_outputs=32,
is_training=is_training
)
#Encoder
for i in range(1, 4):
x = conv(
x,
num_outputs=base_size*2**i,
stride=2,
is_depthwise_sep=is_depthwise_sep,
is_training=is_training,
actv_fn=std_actv
)
if i == 2:
low_level = x
#Residual blocks
for _ in range(6): #Number of blocks
x = residual_block(
x,
skip=3,
is_training=is_training
)
#Decoder
for i in range(2, -1, -1):
x = conv(
x,
num_outputs=base_size*2**i,
stride=2,
is_depthwise_sep=is_depthwise_sep,
is_training=is_training,
transpose=True,
actv_fn=std_actv
)
#if x.get_shape().as_list() == low_level.get_shape().as_list(): #Easy way to find concat level!
# x = tf.concat([x, low_level], axis=-1)
# for _ in range(3):
# x = conv(
# x,
# num_outputs=base_size*2**i,
# is_depthwise_sep=is_depthwise_sep,
# is_training=is_training,
# )
x = conv(
x,
num_outputs=32,
is_depthwise_sep=is_depthwise_sep,
is_training=is_training,
)
#Project features onto output image
x = conv(
x,
num_outputs=num_outputs,
biases_initializer=None,
actv_fn=None,
is_batch_norm=True,
is_training=is_training
)
x /= tf.sqrt(1.e-8 + tf.reduce_sum(x**2, axis=-1, keepdims=True))
x0 = x
x *= inputs
return x, x0
def res_block(x, num_outputs, s=1):
x0 = x
start_channels = x.get_shape().as_list()[-1]
if num_outputs != start_channels:
x0 = conv(
inputs=x0,
num_outputs=num_outputs,
kernel_size=1,
stride=1,
actv_fn=None,
is_batch_norm=False,
is_spectral_norm=True
)
x = conv(
inputs=x,
num_outputs=start_channels,
kernel_size=3,
stride=1,
actv_fn=tf.nn.relu,
is_batch_norm=False,
is_spectral_norm=True
)
x = conv(
inputs=x,
num_outputs=num_outputs,
kernel_size=3,
stride=1,
actv_fn=tf.nn.relu,
is_batch_norm=False,
is_spectral_norm=True
)
x += x0
if s > 1:
#x0 = tf.layers.average_pooling2d(x0, s, s)
x = tf.layers.average_pooling2d(x, s, s)
return x
def large_discriminator(inputs):
#Based on https://arxiv.org/pdf/1802.05637.pdf
x = inputs
for i in range(4):
#x = res_block(x, 64*2**i, s=2)
x = conv(
inputs=x,
num_outputs=50*2**i,
kernel_size=4,
stride=1,
actv_fn=tf.nn.leaky_relu,
is_batch_norm=False,
is_spectral_norm=True,
biases_initializer=None
)
x = conv(
inputs=x,
num_outputs=50*2**i,
kernel_size=4,
stride=2,
actv_fn=tf.nn.leaky_relu,
is_batch_norm=False,
is_spectral_norm=True,
biases_initializer=None
)
#for _ in range(4):
# x = res_block(x, 512)
#for _ in range(3):
# x = conv(
# inputs=x,
# num_outputs=400,
# kernel_size=4,
# stride=1,
# actv_fn=tf.nn.leaky_relu,
# is_batch_norm=False,
# is_spectral_norm=True
# )
##x = res_block(x, 1024, s=2)
#x = conv(
# inputs=x,
# num_outputs=800,
# kernel_size=4,
# stride=2,
# actv_fn=tf.nn.leaky_relu,
# is_batch_norm=False,
# is_spectral_norm=True
#)
x = conv(
inputs=x,
num_outputs=200,
kernel_size=4,
stride=2,
actv_fn=tf.nn.leaky_relu,
is_batch_norm=False,
is_spectral_norm=True,
biases_initializer=None
)
#x = tf.layers.flatten(x)
#x = tf.expand_dims(tf.reduce_sum(x, axis=[1,2,3]), axis=-1)
#x = tf.reduce_sum(x, axis=[1,2])
#x = tf.contrib.layers.fully_connected(x, 1)
#x = tf.contrib.layers.fully_connected(x, 1, activation_fn=None, biases_initializer=None)
#x = spectral_norm_dense(x, 2048, biases_initializer=None)
#x = spectral_norm_dense(x, 1024, biases_initializer=None)
x = tf.expand_dims(tf.reduce_mean(x, axis=[1,2,3]), axis=-1)
#x = spectral_norm_dense(x, 1, biases_initializer=None)
#x += 0.5
#x = 0.5 - 0.1 + 1.1*tf.sigmoid(x)
#x = 1 + tf.nn.elu(x)
return x
def configure(
inputs,
batch_size,
target_outputs,
is_training,
learning_rate,
beta1,
is_depthwise_sep,
decay,
gen_scale
):
"""Operations to calculate network losses and run training operations."""
target_outputs0 = target_outputs
with tf.variable_scope("gen"):
output0, phase_components = generator(
inputs=inputs,
num_outputs=target_outputs.get_shape().as_list()[-1],
is_training=is_training,
is_depthwise_sep=is_depthwise_sep
)
output = output0
if adversarial:
#Theoretical argument for EMA tracking is in https://openreview.net/pdf?id=SJgw_sRqFQ
#with tf.variable_scope("tracking/gen"):
# tracking_output = generator(
# inputs=inputs,
# num_outputs=target_outputs.get_shape().as_list()[-1],
# is_training=is_training,
# is_depthwise_sep=is_depthwise_sep
# )
def amp(x):
return 1 + tf.sqrt(1.e-8 + tf.reduce_sum(x**2, axis=-1, keepdims=True))
output = inputs*phase_components#tf.concat([inputs, phase_components], axis=-1)
target_outputs = inputs*target_outputs#tf.concat([inputs, target_outputs], axis=-1)
if use_gradient_penalty:
x_hat = output + tf.random_uniform(output.get_shape().as_list())*(target_outputs-output)
discr_batch = tf.concat([output, target_outputs, x_hat], axis=0)
else:
discr_batch = tf.concat([output, target_outputs], axis=0)
with tf.variable_scope("main/discr"):
preds = large_discriminator(discr_batch)
#with tf.variable_scope("tracking/discr"):
# track_pred = large_discriminator(output)
fake_pred = preds[:batch_size]
real_pred = preds[batch_size:2*batch_size]
if use_gradient_penalty:
x_hat_pred = preds[2*batch_size:3*batch_size]
if use_gradient_penalty:
grad = tf.gradients(x_hat_pred, [x_hat])[0]
grad_norm2 = tf.sqrt(1.e-6 + tf.reduce_sum(tf.square(grad), axis=[1,2,3]))
gradient_penalty = tf.reduce_mean( (grad_norm2 - 1.)**2 )
if use_gradient_penalty or standard_wass:
discr_loss = tf.reduce_mean(fake_pred - real_pred)
gen_loss = -tf.reduce_mean(fake_pred)
else:
#noise = tf.random_uniform(real_pred.get_shape().as_list(), maxval=0.05)
discr_loss = tf.reduce_mean( (real_pred - 1)**2 + (fake_pred)**2 )
gen_loss = tf.reduce_mean( (fake_pred - 1)**2 )
if standard_wass:
for v in tf.trainable_variables("main/discr"):
tf.add_to_collection("clip_weights", v.assign(tf.clip_by_value(v, -0.01, 0.01)))
#mu = tf.get_variable(
# auto_name("avg_loss"),
# initializer=tf.constant(0.707, dtype=tf.float32),
# trainable=False
# )
#mu_op = mu.assign(0.999*mu + 0.001*tf.sqrt(discr_loss))
#tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, mu_op)
#mu_scaled = mu/0.707
#discr_lr_scale = tf.cond(mu_scaled > 0.6, lambda: 1., lambda: (mu_scaled/0.6)**2 )
if use_gradient_penalty:
discr_loss += 10*gradient_penalty
#discr_loss /= 100
#gen_loss /= 100
if use_l2_loss:
gen_l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables("gen")])
discr_l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables("main/discr")])
discr_loss += 5.e-5*discr_l2_loss
gen_loss += 5.e-5*gen_l2_loss
#discr_loss = tf.reduce_mean( tf.nn.relu(1-real_pred) + tf.nn.relu(1+fake_pred), axis=-1 ) + 10*gradient_penalty #+ 1.e-5*discr_l2_loss
#gen_loss = -tf.reduce_mean( fake_pred, axis=-1 )# + 5.e-5*gen_l2_loss
#discr_loss = tf.reduce_mean(fake_pred - real_pred) / 1 + 10*gradient_penalty + 1.e-5*discr_l2_loss
#gen_loss = -tf.reduce_mean(fake_pred) / 1 + 1.e-5*gen_l2_loss
#Create optimizer for stochastic gradient descent (SGD)
discr_optimizer = tf.train.AdamOptimizer(
learning_rate=0.0002,
beta1=0.5
)
#discr_optimizer = tf.train.RMSPropOptimizer(learning_rate=0.00005, decay=0.5)
#l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()])
#total_loss = gen_loss + discr_loss + 10*gradient_penalty + 5.e-5*l2_loss
##Tracking
#for v, t in zip(tf.trainable_variables("main"), tf.trainable_variables("tracking")):
# tf.add_to_collection( tf.GraphKeys.UPDATE_OPS, t.assign(decay*t+(1-decay)*v) )
else:
#Mean squared errors
mse = 10*tf.reduce_mean( tf.square(output - target_outputs), axis=[1,2,3] )
alrc_mse = mse#alrc(mse)
alrc_mse = tf.reduce_mean(alrc_mse)
mse = tf.reduce_mean(mse)
##L2 regularization
l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()])
gen_loss = alrc_mse + 5.e-5*l2_loss
#Create optimizer for stochastic gradient descent (SGD)
gen_optimizer = tf.train.AdamOptimizer(
learning_rate=0.0002,
beta1=0.5
)
#gen_optimizer = tf.train.RMSPropOptimizer(learning_rate=0.0001, decay=0.5)
#(
# learning_rate=learning_rate,
# beta1=beta1,
# beta2=0.9
# )
#Update ops for batch normalisation
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
if adversarial:
#train_op = gen_optimizer.minimize(total_loss)
gen_train_op = gen_optimizer.minimize(gen_loss, var_list=tf.trainable_variables("gen"))
discr_train_op = discr_optimizer.minimize(discr_loss, var_list=tf.trainable_variables("main/discr"))
train_op = [gen_train_op, discr_train_op]
else:
train_op = gen_optimizer.minimize(gen_loss)
output_loss = {
"Loss": tf.reduce_mean( tf.abs(phase_components - target_outputs0) ),
"pred_real": tf.reduce_mean(real_pred),
"pred_fake": tf.reduce_mean(fake_pred)
}
return train_op, output_loss, output0
def experiment(report_every_n=100):
"""Run training operations, then validate.
Args:
report_every_n: Print loss every n training operations. 0 for no printing.
Returns:
Validation top-1 accuracy and a numpy array of training losses
"""
#Placeholders to feed hyperparameters into graph
learning_rate_ph = tf.placeholder(tf.float32, name="learning_rate")
beta1_ph = tf.placeholder(
tf.float32,
shape=(),
name="beta1")
decay_ph = tf.placeholder(
tf.float32,
shape=(),
name="decay")
gen_scale_ph = tf.placeholder(
tf.float32,
shape=(),
name="gen_scale")
is_training_ph = tf.placeholder(
tf.bool,
name="is_training")
mode_ph = tf.placeholder(
tf.int32,
name="mode")
#data_dir = "//Desktop-sa1evjv/h/wavefunctions/"
data_dir = "//Desktop-sa1evjv/f/wavefunctions_single/wavefunctions/"
batch_size = 24
def load_data_subset(subset):
return load_data(
dir=data_dir,
subset=subset,
batch_size=batch_size
)
inputs, target_outputs = tf.case(
{tf.equal(mode_ph, 0): lambda: load_data_subset("train"),
tf.equal(mode_ph, 1): lambda: load_data_subset("val"),
tf.equal(mode_ph, 2): lambda: load_data_subset("test")}
)
#Describe learning policy
start_iter = 307_097#186_911#0#8_094#0
train_iters = 500_000
val_iters = 1_000
learning_rate = 0.0002
beta1 = 0.9
#Configure operations
train_op, loss, output = configure(
inputs=inputs,
batch_size=batch_size,
target_outputs=target_outputs,
is_training=is_training_ph,
learning_rate=learning_rate_ph,
beta1=beta1_ph,
is_depthwise_sep=False,
decay=decay_ph,
gen_scale=gen_scale_ph
)
clip_op = tf.get_collection("clip_weights")
#Tensors to dump as visual output
first_image = inputs[0]
first_target_output = target_outputs[0]
first_output = output[0]
#Session configuration
config = tf.ConfigProto()
config.gpu_options.allow_growth = True #Only use required GPU memory
config.gpu_options.force_gpu_compatible = True
model_dir = f"//flexo.ads.warwick.ac.uk/Shared41/Microscopy/Jeffrey-Ede/models/wavefunctions/{EXPER_NUM}/"
saver = tf.train.Saver(max_to_keep=1)
noteable_saver = tf.train.Saver(max_to_keep=1)
log_filepath = model_dir + "log.txt"
save_period = 1; save_period *= 3600
with tf.Session(config=config) as sess, open(log_filepath, "a") as log_file:
#Initialize network parameters
feed_dict = {
is_training_ph: np.bool(True),
learning_rate_ph: np.float32(learning_rate),
beta1_ph: np.float32(beta1),
mode_ph: np.int32(0),
decay_ph: np.float32(0.),
gen_scale_ph: np.float32(0.)
}
if start_iter:
saver.restore(
sess,
tf.train.latest_checkpoint(model_dir+"model/")
)
else:
sess.run(tf.global_variables_initializer(), feed_dict=feed_dict)
#Finalize graph to prevent additional nodes from being added
#sess.graph.finalize()
#Training
avg_pred_fake = 0.4
beta_pred_fake = 0.9
time0 = time.time()
for iter in range(start_iter, train_iters):
is_halfway = iter >= train_iters // 2
decay = 0.997 if iter else 0.
lr = learning_rate #* 0.5**( max( iter//(train_iters//4), 3) )
is_training = True#iter < 1_000 #not is_halfway
beta1 = 0.9 if iter < 200_000 else 0.5
gen_scale = 1.#0 if iter < 50 else 1.
#Feed values into training operations
feed_dict = {
is_training_ph: np.bool(is_training),
learning_rate_ph: np.float32(lr),
beta1_ph: np.float32(beta1),
mode_ph: np.int32(0),
decay_ph: np.float32(decay),
gen_scale_ph: np.float32(gen_scale)
}
if iter in [0, 100, 500] or not iter % 25_000 or (0 <= iter < 10_000 and not iter % 1000) or iter == start_iter:
_, step_loss, [step_image, step_target_output, step_output] = sess.run([
train_op,
loss,
[first_image, first_target_output, first_output]
],
feed_dict=feed_dict
)
save_input_loc = model_dir+"input-"+str(iter)+".tif"
save_truth_loc = model_dir+"truth-"+str(iter)+".tif"
save_output_loc = model_dir+"output-"+str(iter)+".tif"
target_angle = np.angle(step_target_output[...,0] + 1j*step_target_output[...,1])
output_angle = np.angle(step_output[...,0] + 1j*step_output[...,1])
Image.fromarray(step_image.reshape(cropsize, cropsize).astype(np.float32)).save( save_input_loc )
Image.fromarray(np.cos(target_angle).astype(np.float32)).save( save_truth_loc )
Image.fromarray(np.cos(output_angle).astype(np.float32)).save( save_output_loc )
else:
if True:#avg_pred_fake > 0.3 or use_gradient_penalty or standard_wass:
step_train_op = train_op
else:
step_train_op = [train_op[0]]
_, step_loss = sess.run([step_train_op, loss], feed_dict=feed_dict)
if standard_wass:
sess.run(clip_op)
avg_pred_fake = beta_pred_fake*avg_pred_fake + (1-beta_pred_fake)*step_loss["pred_fake"]
output = f"Iter: {iter}"
for k in step_loss:
output += f", {k}: {step_loss[k]}"
if report_every_n:
if not iter % report_every_n:
print(output)
if "nan" in output:
saver.restore(
sess,
tf.train.latest_checkpoint(model_dir+"model/")
)
#quit()
log_file.write(output)
if iter in [train_iters//2-1, train_iters-1]:
noteable_saver.save(sess, save_path=model_dir+"noteable_ckpt/model", global_step=iter)
time0 = time.time()
start_iter = iter
elif time.time() >= time0 + save_period:
saver.save(sess, save_path=model_dir+"model/model", global_step=iter)
time0 = time.time()
#Validation - super important!
val_loss = 0.
for iter in range(val_iters):
feed_dict = {
is_training_ph: np.bool(False),
mode_ph: np.int32(1),
decay_ph: np.float32(decay)
}
step_loss = sess.run(loss, feed_dict=feed_dict)
val_loss += step_loss
val_loss /= val_iters
return val_loss
if __name__ == "__main__":
#Reset so graph nodes to not accumulate in ipynb session memory.
tf.reset_default_graph()
#Run your experiment!
val_loss = experiment(report_every_n=1)
#Report performance on validation set
print(f"Validation loss: {val_loss}")
with open(model_dir+"val_loss.txt", "w") as f:
f.write(f"Val Loss: {val_loss}") | 30.077769 | 143 | 0.590509 |
b9d4cee6f3492c63efd4843c6370eb6c5f8de572 | 1,184 | py | Python | linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Kamaelia/UI/__init__.py | mdavid/nuxleus | 653f1310d8bf08eaa5a7e3326c2349e56a6abdc2 | [
"BSD-3-Clause"
] | 1 | 2017-03-28T06:41:51.000Z | 2017-03-28T06:41:51.000Z | linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Kamaelia/UI/__init__.py | mdavid/nuxleus | 653f1310d8bf08eaa5a7e3326c2349e56a6abdc2 | [
"BSD-3-Clause"
] | null | null | null | linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Kamaelia/UI/__init__.py | mdavid/nuxleus | 653f1310d8bf08eaa5a7e3326c2349e56a6abdc2 | [
"BSD-3-Clause"
] | 1 | 2016-12-13T21:08:58.000Z | 2016-12-13T21:08:58.000Z | #!/usr/bin/env python
#
# Needed to allow import
#
# Copyright (C) 2004 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: kamaelia-list-owner@lists.sourceforge.net
# to discuss alternative licensing.
# -------------------------------------------------------------------------
"""\
==========================
GUI Integration Components
==========================
Components providing integration with basic GUI libraries, such as pygame and
Tk.
""" | 37 | 83 | 0.646959 |
086f23e72fe724082a2f7061479404f219dbfb58 | 4,789 | py | Python | server/galaxyls/services/xml/document.py | gallardoalba/galaxy-language-server | 6d9642c639002012462d6bbd3d818a2f15e59478 | [
"Apache-2.0"
] | null | null | null | server/galaxyls/services/xml/document.py | gallardoalba/galaxy-language-server | 6d9642c639002012462d6bbd3d818a2f15e59478 | [
"Apache-2.0"
] | null | null | null | server/galaxyls/services/xml/document.py | gallardoalba/galaxy-language-server | 6d9642c639002012462d6bbd3d818a2f15e59478 | [
"Apache-2.0"
] | null | null | null | from typing import Dict, Optional
from anytree.search import findall
from lxml import etree
from pygls.types import Position, Range
from pygls.workspace import Document
from .nodes import XmlElement, XmlSyntaxNode
from .types import DocumentType, NodeType
from .utils import convert_document_offset_to_position, convert_document_offsets_to_range
class XmlDocument(XmlSyntaxNode):
"""Represents a parsed XML document.
This is the root of the XML syntax tree.
"""
def __init__(self, document: Document):
super().__init__()
self.document: Document = document
self.supported_document_types: Dict[str, DocumentType] = {
"tool": DocumentType.TOOL,
"macros": DocumentType.MACROS,
}
@property
def node_type(self) -> NodeType:
"""The type of this node."""
return NodeType.DOCUMENT
@property
def is_empty(self) -> bool:
"""True if the document has no root node."""
return not self.root
@property
def root(self) -> Optional[XmlElement]:
"""The root element of the document.
Normally this would be tool, macros or any other supported
kind of element."""
if len(self.children) == 0:
return None
try:
return next(child for child in self.children if type(child) == XmlElement)
except StopIteration:
return None
@property
def uses_macros(self) -> bool:
"""Indicates if this XML document contains any <expand> element.
Returns:
bool: True if the tool contains at least one <expand> elements.
"""
try:
found = findall(self.root, filter_=lambda node: node.name == "expand", mincount=1)
return len(found) > 0
except BaseException:
return False
@property
def document_type(self) -> DocumentType:
"""The type of this document (if it is supported) or UNKNOWN."""
if self.root and self.root.name:
return self.supported_document_types.get(self.root.name, DocumentType.UNKNOWN)
return DocumentType.UNKNOWN
@property
def is_unknown(self) -> bool:
"""Indicates if the document is of unknown type."""
return self.document_type == DocumentType.UNKNOWN
@property
def is_macros_file(self) -> bool:
"""Indicates if the document is a macro definition file."""
return self.document_type == DocumentType.MACROS
def get_node_at(self, offset: int) -> Optional[XmlSyntaxNode]:
"""Gets the syntax node a the given offset."""
return self.root.find_node_at(offset)
def get_element_content_range(self, element: XmlElement) -> Optional[Range]:
"""Gets the Range positions for the given XML element's content in the document.
Args:
element (XmlElement): The XML element to determine it's content range positions.
Returns:
Optional[Range]: The range positions for the content of the given XML element.
"""
start_offset, end_offset = element.get_content_offsets()
if start_offset < 0 or end_offset < 0:
return None
return convert_document_offsets_to_range(self.document, start_offset, end_offset)
def get_position_before(self, element: XmlElement) -> Position:
"""Return the position in the document before the given element.
Args:
element (XmlElement): The element used to find the position.
Returns:
Position: The position just before the element declaration.
"""
return convert_document_offset_to_position(self.document, element.start)
def get_position_after(self, element: XmlElement) -> Position:
"""Return the position in the document after the given element.
Args:
element (XmlElement): The element used to find the position.
Returns:
Position: The position just after the element declaration.
"""
if element.is_self_closed:
return convert_document_offset_to_position(self.document, element.end)
return convert_document_offset_to_position(self.document, element.end_offset)
@staticmethod
def has_valid_root(document: Document) -> bool:
"""Checks if the document's root element matches one of the supported types."""
try:
xml = etree.parse(str(document.path))
root = xml.getroot()
if root is not None and root.tag:
root_tag = root.tag.upper()
supported = [e.name for e in DocumentType if e != DocumentType.UNKNOWN]
return root_tag in supported
return False
except BaseException:
return False
| 35.474074 | 94 | 0.648152 |
06ebbe939980df36c7a2a54c0ac52fa27c9f9ce7 | 706 | py | Python | python/repository/file_inventory_repository.py | newrelic-experimental/demo-pythontron | 0561d7e496da3a518c28102010c3c76445a47307 | [
"Apache-2.0"
] | null | null | null | python/repository/file_inventory_repository.py | newrelic-experimental/demo-pythontron | 0561d7e496da3a518c28102010c3c76445a47307 | [
"Apache-2.0"
] | null | null | null | python/repository/file_inventory_repository.py | newrelic-experimental/demo-pythontron | 0561d7e496da3a518c28102010c3c76445a47307 | [
"Apache-2.0"
] | null | null | null | import json
from repository.i_inventory_repository import IInventoryRepository
class FileInventoryRepository(IInventoryRepository):
def get_records(self):
file_path = "data/inventory.json"
with open(file_path) as json_file:
data = json.load(json_file)
return data
def get_record_by_id(self, key):
data = self.get_records()
item = self._find_by_key(data, "id", key)
if item is not None:
return item
else:
return {}
@classmethod
def _find_by_key(obj, json_object, key_name, key_value):
for item in json_object:
if item[key_name] == key_value:
return item
| 27.153846 | 66 | 0.626062 |
581b0e4c8cb25617500ef7045dc9e4e63f9412ae | 11,692 | py | Python | jpylib/pgetopt.py | jyrgenn/jpylib | a4711d11c012ad72f60d7591e7ac2c9e53d3ddd6 | [
"BSD-3-Clause"
] | null | null | null | jpylib/pgetopt.py | jyrgenn/jpylib | a4711d11c012ad72f60d7591e7ac2c9e53d3ddd6 | [
"BSD-3-Clause"
] | null | null | null | jpylib/pgetopt.py | jyrgenn/jpylib | a4711d11c012ad72f60d7591e7ac2c9e53d3ddd6 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (C) 2020 Juergen Nickelsen <ni@w21.org>, see LICENSE.
"""POSIX-conformant command-line option parser (plus long options)
See the parse() function for details. Build $__package_version$
"""
import os
import sys
import copy
# Exception argument string and additional value (used to generate README)
ErrorNotopt = "unknown option" # option
ErrorArg = "option does not take an argument" # option
ErrorNoarg = "option needs argument" # option
ErrorIntarg = "option argument must be integer" # option
ErrorMinarg = "too few arguments, needs at least" # minimum
ErrorMaxarg = "too many arguments, at most" # maximum
class OptionError(Exception):
pass
class OptionValueContainer:
def __init__(self, descriptors, args):
"""Arguments: dict optchar => descriptor; command-line args.
See the parse() function below for details.
"""
self._opts = copy.deepcopy(descriptors)
_keywords = ("_arguments", "_help_header", "_help_footer",
"_usage", "_program")
for opt, desc in self._opts.items():
if opt.startswith("_"):
assert opt in _keywords, "keyword unknown: " + repr(opt)
# The following is grossly redundant -- it is a kludge to finally
# reach 100% test coverage; apparently impossible if both
# statements are under the same "if". Luckly we don't need to
# optimise for speed. :-(
if opt.startswith("_"):
continue
assert type(opt) == str and len(opt) == 1, \
"option key must be string of length 1: " + repr(opt)
assert type(desc) == tuple and len(desc) in (4, 5), \
"descriptor not sequence len 4 or 5: -" + opt
name, typ, default, *_ = desc
assert isinstance(name, str), "option name is not a string: -" + opt
if type(typ) == type:
assert typ in (bool, int, str), "invalid option type: -" + opt
else:
assert callable(typ), "invalid option type: -"+opt
self.__dict__[name] = default
if "?" not in self._opts:
self._opts["?"] = \
("help", self.ovc_help, None, "show help on options and things")
if "h" not in self._opts:
self._opts["h"] = self._opts["?"]
for field in _keywords:
self.__dict__[field] = self._opts.get(field)
if not self._program:
self._program = os.path.basename(sys.argv[0])
self._long = { v[0].replace("_", "-"): v
for k, v in self._opts.items() if len(k) == 1 }
self._args = args[:]
self._min = self._max = None
if type(self._arguments) == list:
min = max = 0
inf = False
for arg in self._arguments:
if "..." in arg:
inf = True
if arg.startswith("["):
max += len(arg.split(" "))
elif not arg == "...":
min += 1
max += 1
self._min = min
self._max = None if inf else max
self._arguments = " ".join(self._arguments)
def _parse(self):
while self._args and self._args[0].startswith("-"):
arg = self._args.pop(0)
if arg == "--": break
if arg.startswith("--"):
self._have_opt(arg[2:])
else:
arg = arg[1:]
while arg:
arg = self._have_opt(arg[0], arg[1:])
if self._min is not None and len(self._args) < self._min:
raise OptionError(ErrorMinarg, self._min)
if self._max is not None and len(self._args) > self._max:
raise OptionError(ErrorMaxarg, self._max)
def _have_opt(self, opt, arg=None):
value = None
if len(opt) > 1:
parts = opt.split("=", 1)
if len(parts) > 1:
opt, value = parts
desc = self._long.get(opt)
else:
desc = self._opts.get(opt)
if not desc:
raise OptionError(ErrorNotopt, opt)
name, typ, defval, *_ = desc
if typ == bool:
if value:
raise OptionError(ErrorArg, opt)
self.__dict__[name] += 1
else:
if typ not in (str, int):
value = typ()
elif arg:
value = arg
arg = ""
self._set_optarg(opt, desc, value)
return arg
def _set_optarg(self, opt, desc, value):
if value is None:
if not self._args:
raise OptionError(ErrorNoarg, opt)
value = self._args.pop(0)
if desc[1] == int:
try:
value = int(value)
except:
raise OptionError(ErrorIntarg, opt)
if isinstance(getattr(self, desc[0], None), list):
getattr(self, desc[0]).append(value)
else:
setattr(self, desc[0], value)
def ovc_help(self):
"""Print the help message and exit."""
print(self.ovc_help_msg())
sys.exit()
def ovc_help_msg(self):
"""Return a detailed help message."""
msg = self.ovc_usage_msg() + "\n"
if self._help_header:
msg += self._help_header + "\n\n"
for opt in sorted(self._opts.keys()):
if opt.startswith("_"):
continue
desc = self._opts[opt]
arg = ""
if desc[1] in (str, int):
arg = " " + (desc[4] if len(desc) == 5 else "ARG")
msg += " -%s, --%s%s:\n %s" % (
opt, desc[0].replace('_', '-'), arg, desc[3])
if desc[1] in (int, str):
msg += " (%s arg, default %s)" % (
desc[1].__name__, repr(desc[2]))
msg += "\n"
if self._help_footer:
msg += "\n" + self._help_footer
return msg
def ovc_usage(self, error="", exit_status=64):
"""Print usage message (with optional error message) and exit."""
out = sys.stdout if not exit_status else sys.stderr
if error:
print(self._program + ":", error, file=out, end="\n\n")
print(self.ovc_usage_msg(), file=out)
print("use '-?' option to get more help", file=out)
sys.exit(exit_status)
def ovc_usage_msg(self):
"""Return a brief usage message."""
args = ""
if self._arguments is None:
args = " <arguments>"
elif self._arguments:
args = " " + self._arguments
noarg = ""
w_arg = []
for key, desc in self._opts.items():
if len(key) > 1 or key in "h?":
continue
if desc[1] is str:
w_arg.append((key, (desc[4] if len(desc) == 5 else "ARG")))
else:
noarg += key
options = " "
if noarg:
options += "[-" + "".join(sorted(noarg)) + "]"
for opt in w_arg:
options += " [-{} {}]".format(opt[0], opt[1])
return self._usage or "usage: " + self._program + options + args
def ovc_values(self):
"""Return a dict of options and their values (for testing)."""
return { key: val for key, val in self.__dict__.items()
if not key.startswith("_") }
def parse(descriptors, args=sys.argv[1:], exit_on_error=True):
"""Parse the command line options according to the specified descriptors.
Keys of the descriptors dictionary are options or keywords. In case
of an option, the key is the single option character, and the value
is a tuple of four or five fields:
(1) name of the option, used in the returned namespace and as the
name of the corresponding long option name (after replacing
underscores with dashes)
(2) type of the option, which may be bool for options without
arguments (actually counters), or str or int for options with an
argument of the respective type
(3) default value, which can be a starting counter (or False) for
bool options, or an integer or string value for int or str
options, respectively, or a list, to which each option argument
will be appended (for multi-value options)
(4) description of the option for the help text
(5) (optional) name of the option's argument for the help text
(defaults to 'ARG')
A key may also be one of these keywords:
"_arguments": string to print in the usage to describe the
non-option arguments, or, for argument count checking, a sequence
with the argument names:
- a normal string counts as one argument towards minimum and
maximum
- if it contains '...', there is no maximum the number of
arguments
- if it begins with '[', it is optional; if it can be split by
blanks into multiple words, each one counts toward the
maximum; e.g. "[param1 param2 param3]" increases the maximum
by 3, but not the minimum
"_help_footer": string to print with 'help' after the option
explanations
"_help_header": string to print with 'help' before the option
explanations
"_program": string to use as program name for help and usage
message instead of sys.argv[0]
"_usage": string to use as usage message instead of the default
constructed one
If no '?' or 'h' option is specified, they will default to a long
form of '--help' and a 'help' function, which will be called
immediately when the option is seen. It prints a brief summary of
the program's parameters and a description of the options, framed
by the _help_header and the _help_footer; it terminates the program
after printing the message.
In case of a normal return of parse() (i.e. options and number of
arguments okay), it returns an OptionValueContainer and a list of
the remaining command line arguments. Example:
ovc, args = pgetopt.parse({
# opt: (name, type, default value, helptext[, arg name])
"s": ("schmooze", bool, 0, "more schmooziness"),
"o": ("output_file", str, None, "output file (or stdout)", "NAME"),
"n": ("repetitions", int, 3, "number of repetitions"),
"d": ("debug", str, [], "debug topics", "DEBUG_TOPIC"),
# keyword: value
"_arguments": ["string_to_print", "..."],
"_help_header": "print a string a number of times",
"_help_footer": "This is just an example program.",
}
On return, ovc has the following fields:
ovc.schmooze: number of -s options counted,
ovc.output_file: parameter of -o or --output-file, or None
ovc.repetitions: parameter of -n or --repetitions, or 3
ovc.debug: list with all parameters given to -d or --debug
Parameters to int or str options are taken from the next argument;
with long options, "--option=parameter" is also possible.
Other potentially useful fields of ovc:
ovc.ovc_help(): help function
ovc.ovc_usage(): usage function
ovc.ovc_help_msg(),
ovc.ovc_usage_msg(): get corresponding messages as strings
"""
ovc = OptionValueContainer(descriptors, args)
try:
ovc._parse()
return ovc, ovc._args
except OptionError as e:
if exit_on_error:
ovc.ovc_usage(e.args[0] + ": " + repr(e.args[1]))
raise(e)
# EOF
| 37.354633 | 80 | 0.558844 |
846264b32188cffcf1dc9199d918cd246aa9ecbf | 345 | py | Python | Instagram/urls.py | katono254/Instagram | 7fc129a56c0cdb79d0270d0d5d809db711c7e47d | [
"MIT"
] | null | null | null | Instagram/urls.py | katono254/Instagram | 7fc129a56c0cdb79d0270d0d5d809db711c7e47d | [
"MIT"
] | 8 | 2020-06-05T20:52:19.000Z | 2022-03-12T00:15:13.000Z | Instagram/urls.py | katono254/Instagram | 7fc129a56c0cdb79d0270d0d5d809db711c7e47d | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.conf.urls import url, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url('admin/', admin.site.urls),
url('', include('Instaclone.urls'))
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | 31.363636 | 80 | 0.75942 |
83bffd75389777139f9eb3c5f7f6792a565a891e | 3,548 | py | Python | unified_planning/test/test_python_writer.py | aiplan4eu/unified-planning | d2fd18baa3a2110595e5dfdc3f55254df72c3016 | [
"Apache-2.0"
] | 9 | 2022-02-18T14:51:58.000Z | 2022-03-31T06:02:43.000Z | unified_planning/test/test_python_writer.py | aiplan4eu/unified-planning | d2fd18baa3a2110595e5dfdc3f55254df72c3016 | [
"Apache-2.0"
] | 37 | 2022-02-01T10:44:38.000Z | 2022-03-31T09:13:42.000Z | unified_planning/test/test_python_writer.py | aiplan4eu/unified-planning | d2fd18baa3a2110595e5dfdc3f55254df72c3016 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 AIPlan4EU project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from unified_planning.shortcuts import *
from unified_planning.test import TestCase, main
from unified_planning.test.examples import get_example_problems
from unified_planning.io import PythonWriter
class TestPythonWriter(TestCase):
def setUp(self):
TestCase.setUp(self)
self.problems = get_example_problems()
def test_all(self):
for p in self.problems.values():
original_problem = p.problem
pw = PythonWriter(original_problem)
_locals = {}
exec(pw.write_problem_code(), {}, _locals)
self.assertEqual(original_problem, _locals['problem'])
def test_ad_hoc_1(self):
xd = Fluent('x-$')
xe = Fluent('x-&')
nop = Fluent('')
a = InstantaneousAction('3')
a.add_precondition(Not(xd))
a.add_effect(xd, True)
a.add_precondition(Not(xe))
a.add_effect(xe, True)
a.add_precondition(Not(nop))
a.add_effect(nop, True)
problem = Problem('basic')
problem.add_fluent(xd)
problem.add_fluent(xe)
problem.add_fluent(nop)
problem.add_action(a)
problem.set_initial_value(xd, False)
problem.set_initial_value(xe, False)
problem.set_initial_value(nop, False)
problem.add_goal(xd)
problem.add_goal(xe)
problem.add_goal(nop)
pw = PythonWriter(problem)
_locals = {}
exec(pw.write_problem_code(), {}, _locals)
self.assertEqual(problem, _locals['problem'])
def test_ad_hoc_2(self):
Location = UserType('Location')
robot_at = Fluent('robot_at', BoolType(), OrderedDict([('if', Location)]))
battery_charge = Fluent('battery_charge', RealType(0, 100))
move = InstantaneousAction('move', OrderedDict([('from', Location), ('to', Location)]))
l_from = move.parameter('from')
to = move.parameter('to')
move.add_precondition(GE(battery_charge, 10))
move.add_precondition(Not(Equals(l_from, to)))
move.add_precondition(robot_at(l_from))
move.add_precondition(Not(robot_at(to)))
move.add_effect(robot_at(l_from), False)
move.add_effect(robot_at(to), True)
move.add_effect(battery_charge, Minus(battery_charge, 10))
l1 = Object('l1', Location)
l2 = Object('l2', Location)
problem = Problem('robot')
problem.add_fluent(robot_at)
problem.add_fluent(battery_charge)
problem.add_action(move)
problem.add_object(l1)
problem.add_object(l2)
problem.set_initial_value(robot_at(l1), True)
problem.set_initial_value(robot_at(l2), False)
problem.set_initial_value(battery_charge, 100)
problem.add_goal(robot_at(l2))
pw = PythonWriter(problem)
_locals = {}
exec(pw.write_problem_code(), {}, _locals)
self.assertEqual(problem, _locals['problem'])
| 38.565217 | 95 | 0.66009 |
730c8d3ebf7385cd88ac734473cadecf1e56d4ca | 1,236 | py | Python | test codes/test_max.py | DionEngels/MBxPython | 1f1bc7f3be86082a6f3f4dc0eaf162db00061b34 | [
"MIT"
] | null | null | null | test codes/test_max.py | DionEngels/MBxPython | 1f1bc7f3be86082a6f3f4dc0eaf162db00061b34 | [
"MIT"
] | null | null | null | test codes/test_max.py | DionEngels/MBxPython | 1f1bc7f3be86082a6f3f4dc0eaf162db00061b34 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 7 14:09:00 2020
@author: s150127
"""
import numpy as np
import time
def makeGaussian(size, fwhm = 3, center=None):
""" Make a square gaussian kernel.
size is the length of a side of the square
fwhm is full-width-half-maximum, which
can be thought of as an effective radius.
"""
x = np.arange(0, size, 1, float)
y = x[:,np.newaxis]
if center is None:
x0 = y0 = size // 2
else:
x0 = center[0]
y0 = center[1]
return np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)
roi = makeGaussian(9, center=[4, 4])
num_loop = 100000
loops = list(range(0, num_loop))
start = time.time()
for loop in loops:
maximum = np.max(roi)
print('Time taken NP ' + str(round(time.time() - start, 3)) + ' s. Loops: ' + str(len(loops)))
start = time.time()
for loop in loops:
maximum2 = np.amax(roi)
print('Time taken NP2: ' + str(round(time.time() - start, 3)) + ' s. Loops: ' + str(len(loops)))
import MBx_FORTRAN_TEST_v3 as fortran
start = time.time()
for loop in loops:
maximum3 = fortran.max9(roi)
print('Time taken FORTRAN: ' + str(round(time.time() - start, 3)) + ' s. Loops: ' + str(len(loops)))
| 20.262295 | 100 | 0.597087 |
e4c7bf250abd1ca01eb9be33d7a3dfea2ccd475e | 21,027 | py | Python | research/object_detection/models/keras_models/resnet_v1.py | gujralsanyam22/models | d96f8f043dbe2b5ca8ea1785f57df8faf68d8875 | [
"Apache-2.0"
] | 549 | 2020-01-02T05:14:57.000Z | 2022-03-29T18:34:12.000Z | research/object_detection/models/keras_models/resnet_v1.py | yangxl-2014-fe/models | 11ea5237818e791a5717716d5413977f4c4db1e3 | [
"Apache-2.0"
] | 98 | 2020-01-21T09:41:30.000Z | 2022-03-12T00:53:06.000Z | research/object_detection/models/keras_models/resnet_v1.py | yangxl-2014-fe/models | 11ea5237818e791a5717716d5413977f4c4db1e3 | [
"Apache-2.0"
] | 233 | 2020-01-18T03:46:27.000Z | 2022-03-19T03:17:47.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A wrapper around the Keras Resnet V1 models for object detection."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.python.keras.applications import resnet
from object_detection.core import freezable_batch_norm
from object_detection.models.keras_models import model_utils
def _fixed_padding(inputs, kernel_size, rate=1): # pylint: disable=invalid-name
"""Pads the input along the spatial dimensions independently of input size.
Pads the input such that if it was used in a convolution with 'VALID' padding,
the output would have the same dimensions as if the unpadded input was used
in a convolution with 'SAME' padding.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = tf.pad(
inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return padded_inputs
class _LayersOverride(object):
"""Alternative Keras layers interface for the Keras Resnet V1."""
def __init__(self,
batchnorm_training,
batchnorm_scale=True,
default_batchnorm_momentum=0.997,
default_batchnorm_epsilon=1e-5,
weight_decay=0.0001,
conv_hyperparams=None,
min_depth=8,
depth_multiplier=1):
"""Alternative tf.keras.layers interface, for use by the Keras Resnet V1.
The class is used by the Keras applications kwargs injection API to
modify the Resnet V1 Keras application with changes required by
the Object Detection API.
Args:
batchnorm_training: Bool. Assigned to Batch norm layer `training` param
when constructing `freezable_batch_norm.FreezableBatchNorm` layers.
batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale
the activations in the batch normalization layer.
default_batchnorm_momentum: Float. When 'conv_hyperparams' is None,
batch norm layers will be constructed using this value as the momentum.
default_batchnorm_epsilon: Float. When 'conv_hyperparams' is None,
batch norm layers will be constructed using this value as the epsilon.
weight_decay: The weight decay to use for regularizing the model.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops. Optionally set to `None`
to use default resnet_v1 layer builders.
min_depth: Minimum number of filters in the convolutional layers.
depth_multiplier: The depth multiplier to modify the number of filters
in the convolutional layers.
"""
self._batchnorm_training = batchnorm_training
self._batchnorm_scale = batchnorm_scale
self._default_batchnorm_momentum = default_batchnorm_momentum
self._default_batchnorm_epsilon = default_batchnorm_epsilon
self._conv_hyperparams = conv_hyperparams
self._min_depth = min_depth
self._depth_multiplier = depth_multiplier
self.regularizer = tf.keras.regularizers.l2(weight_decay)
self.initializer = tf.variance_scaling_initializer()
def _FixedPaddingLayer(self, kernel_size, rate=1): # pylint: disable=invalid-name
return tf.keras.layers.Lambda(
lambda x: _fixed_padding(x, kernel_size, rate))
def Conv2D(self, filters, kernel_size, **kwargs): # pylint: disable=invalid-name
"""Builds a Conv2D layer according to the current Object Detection config.
Overrides the Keras Resnet application's convolutions with ones that
follow the spec specified by the Object Detection hyperparameters.
Args:
filters: The number of filters to use for the convolution.
kernel_size: The kernel size to specify the height and width of the 2D
convolution window.
**kwargs: Keyword args specified by the Keras application for
constructing the convolution.
Returns:
A one-arg callable that will either directly apply a Keras Conv2D layer to
the input argument, or that will first pad the input then apply a Conv2D
layer.
"""
# Apply the minimum depth to the convolution layers.
filters = max(int(filters * self._depth_multiplier), self._min_depth)
if self._conv_hyperparams:
kwargs = self._conv_hyperparams.params(**kwargs)
else:
kwargs['kernel_regularizer'] = self.regularizer
kwargs['kernel_initializer'] = self.initializer
# Set use_bias as false to keep it consistent with Slim Resnet model.
kwargs['use_bias'] = False
kwargs['padding'] = 'same'
stride = kwargs.get('strides')
if stride and kernel_size and stride > 1 and kernel_size > 1:
kwargs['padding'] = 'valid'
def padded_conv(features): # pylint: disable=invalid-name
padded_features = self._FixedPaddingLayer(kernel_size)(features)
return tf.keras.layers.Conv2D(
filters, kernel_size, **kwargs)(padded_features)
return padded_conv
else:
return tf.keras.layers.Conv2D(filters, kernel_size, **kwargs)
def Activation(self, *args, **kwargs): # pylint: disable=unused-argument,invalid-name
"""Builds an activation layer.
Overrides the Keras application Activation layer specified by the
Object Detection configuration.
Args:
*args: Ignored,
required to match the `tf.keras.layers.Activation` interface.
**kwargs: Only the name is used,
required to match `tf.keras.layers.Activation` interface.
Returns:
An activation layer specified by the Object Detection hyperparameter
configurations.
"""
name = kwargs.get('name')
if self._conv_hyperparams:
return self._conv_hyperparams.build_activation_layer(name=name)
else:
return tf.keras.layers.Lambda(tf.nn.relu, name=name)
def BatchNormalization(self, **kwargs): # pylint: disable=invalid-name
"""Builds a normalization layer.
Overrides the Keras application batch norm with the norm specified by the
Object Detection configuration.
Args:
**kwargs: Only the name is used, all other params ignored.
Required for matching `layers.BatchNormalization` calls in the Keras
application.
Returns:
A normalization layer specified by the Object Detection hyperparameter
configurations.
"""
name = kwargs.get('name')
if self._conv_hyperparams:
return self._conv_hyperparams.build_batch_norm(
training=self._batchnorm_training,
name=name)
else:
kwargs['scale'] = self._batchnorm_scale
kwargs['epsilon'] = self._default_batchnorm_epsilon
return freezable_batch_norm.FreezableBatchNorm(
training=self._batchnorm_training,
momentum=self._default_batchnorm_momentum,
**kwargs)
def Input(self, shape): # pylint: disable=invalid-name
"""Builds an Input layer.
Overrides the Keras application Input layer with one that uses a
tf.placeholder_with_default instead of a tf.placeholder. This is necessary
to ensure the application works when run on a TPU.
Args:
shape: A tuple of integers representing the shape of the input, which
includes both spatial share and channels, but not the batch size.
Elements of this tuple can be None; 'None' elements represent dimensions
where the shape is not known.
Returns:
An input layer for the specified shape that internally uses a
placeholder_with_default.
"""
default_size = 224
default_batch_size = 1
shape = list(shape)
default_shape = [default_size if dim is None else dim for dim in shape]
input_tensor = tf.constant(0.0, shape=[default_batch_size] + default_shape)
placeholder_with_default = tf.placeholder_with_default(
input=input_tensor, shape=[None] + shape)
return model_utils.input_layer(shape, placeholder_with_default)
def MaxPooling2D(self, pool_size, **kwargs): # pylint: disable=invalid-name
"""Builds a MaxPooling2D layer with default padding as 'SAME'.
This is specified by the default resnet arg_scope in slim.
Args:
pool_size: The pool size specified by the Keras application.
**kwargs: Ignored, required to match the Keras applications usage.
Returns:
A MaxPooling2D layer with default padding as 'SAME'.
"""
kwargs['padding'] = 'same'
return tf.keras.layers.MaxPooling2D(pool_size, **kwargs)
# Add alias as Keras also has it.
MaxPool2D = MaxPooling2D # pylint: disable=invalid-name
def ZeroPadding2D(self, padding, **kwargs): # pylint: disable=unused-argument,invalid-name
"""Replaces explicit padding in the Keras application with a no-op.
Args:
padding: The padding values for image height and width.
**kwargs: Ignored, required to match the Keras applications usage.
Returns:
A no-op identity lambda.
"""
return lambda x: x
# Forward all non-overridden methods to the keras layers
def __getattr__(self, item):
return getattr(tf.keras.layers, item)
# pylint: disable=invalid-name
def resnet_v1_50(batchnorm_training,
batchnorm_scale=True,
default_batchnorm_momentum=0.997,
default_batchnorm_epsilon=1e-5,
weight_decay=0.0001,
conv_hyperparams=None,
min_depth=8,
depth_multiplier=1,
**kwargs):
"""Instantiates the Resnet50 architecture, modified for object detection.
Args:
batchnorm_training: Bool. Assigned to Batch norm layer `training` param
when constructing `freezable_batch_norm.FreezableBatchNorm` layers.
batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale
the activations in the batch normalization layer.
default_batchnorm_momentum: Float. When 'conv_hyperparams' is None,
batch norm layers will be constructed using this value as the momentum.
default_batchnorm_epsilon: Float. When 'conv_hyperparams' is None,
batch norm layers will be constructed using this value as the epsilon.
weight_decay: The weight decay to use for regularizing the model.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops. Optionally set to `None`
to use default resnet_v1 layer builders.
min_depth: Minimum number of filters in the convolutional layers.
depth_multiplier: The depth multiplier to modify the number of filters
in the convolutional layers.
**kwargs: Keyword arguments forwarded directly to the
`tf.keras.applications.Mobilenet` method that constructs the Keras
model.
Returns:
A Keras ResnetV1-50 model instance.
"""
layers_override = _LayersOverride(
batchnorm_training,
batchnorm_scale=batchnorm_scale,
default_batchnorm_momentum=default_batchnorm_momentum,
default_batchnorm_epsilon=default_batchnorm_epsilon,
conv_hyperparams=conv_hyperparams,
weight_decay=weight_decay,
min_depth=min_depth,
depth_multiplier=depth_multiplier)
return tf.keras.applications.resnet.ResNet50(
layers=layers_override, **kwargs)
def resnet_v1_101(batchnorm_training,
batchnorm_scale=True,
default_batchnorm_momentum=0.997,
default_batchnorm_epsilon=1e-5,
weight_decay=0.0001,
conv_hyperparams=None,
min_depth=8,
depth_multiplier=1,
**kwargs):
"""Instantiates the Resnet50 architecture, modified for object detection.
Args:
batchnorm_training: Bool. Assigned to Batch norm layer `training` param
when constructing `freezable_batch_norm.FreezableBatchNorm` layers.
batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale
the activations in the batch normalization layer.
default_batchnorm_momentum: Float. When 'conv_hyperparams' is None,
batch norm layers will be constructed using this value as the momentum.
default_batchnorm_epsilon: Float. When 'conv_hyperparams' is None,
batch norm layers will be constructed using this value as the epsilon.
weight_decay: The weight decay to use for regularizing the model.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops. Optionally set to `None`
to use default resnet_v1 layer builders.
min_depth: Minimum number of filters in the convolutional layers.
depth_multiplier: The depth multiplier to modify the number of filters
in the convolutional layers.
**kwargs: Keyword arguments forwarded directly to the
`tf.keras.applications.Mobilenet` method that constructs the Keras
model.
Returns:
A Keras ResnetV1-101 model instance.
"""
layers_override = _LayersOverride(
batchnorm_training,
batchnorm_scale=batchnorm_scale,
default_batchnorm_momentum=default_batchnorm_momentum,
default_batchnorm_epsilon=default_batchnorm_epsilon,
conv_hyperparams=conv_hyperparams,
weight_decay=weight_decay,
min_depth=min_depth,
depth_multiplier=depth_multiplier)
return tf.keras.applications.resnet.ResNet101(
layers=layers_override, **kwargs)
def resnet_v1_152(batchnorm_training,
batchnorm_scale=True,
default_batchnorm_momentum=0.997,
default_batchnorm_epsilon=1e-5,
weight_decay=0.0001,
conv_hyperparams=None,
min_depth=8,
depth_multiplier=1,
**kwargs):
"""Instantiates the Resnet50 architecture, modified for object detection.
Args:
batchnorm_training: Bool. Assigned to Batch norm layer `training` param
when constructing `freezable_batch_norm.FreezableBatchNorm` layers.
batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale
the activations in the batch normalization layer.
default_batchnorm_momentum: Float. When 'conv_hyperparams' is None,
batch norm layers will be constructed using this value as the momentum.
default_batchnorm_epsilon: Float. When 'conv_hyperparams' is None,
batch norm layers will be constructed using this value as the epsilon.
weight_decay: The weight decay to use for regularizing the model.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops. Optionally set to `None`
to use default resnet_v1 layer builders.
min_depth: Minimum number of filters in the convolutional layers.
depth_multiplier: The depth multiplier to modify the number of filters
in the convolutional layers.
**kwargs: Keyword arguments forwarded directly to the
`tf.keras.applications.Mobilenet` method that constructs the Keras
model.
Returns:
A Keras ResnetV1-152 model instance.
"""
layers_override = _LayersOverride(
batchnorm_training,
batchnorm_scale=batchnorm_scale,
default_batchnorm_momentum=default_batchnorm_momentum,
default_batchnorm_epsilon=default_batchnorm_epsilon,
conv_hyperparams=conv_hyperparams,
weight_decay=weight_decay,
min_depth=min_depth,
depth_multiplier=depth_multiplier)
return tf.keras.applications.resnet.ResNet152(
layers=layers_override, **kwargs)
# pylint: enable=invalid-name
# The following codes are based on the existing keras ResNet model pattern:
# google3/third_party/tensorflow/python/keras/applications/resnet.py
def block_basic(x,
filters,
kernel_size=3,
stride=1,
conv_shortcut=False,
name=None):
"""A residual block for ResNet18/34.
Arguments:
x: input tensor.
filters: integer, filters of the bottleneck layer.
kernel_size: default 3, kernel size of the bottleneck layer.
stride: default 1, stride of the first layer.
conv_shortcut: default False, use convolution shortcut if True, otherwise
identity shortcut.
name: string, block label.
Returns:
Output tensor for the residual block.
"""
layers = tf.keras.layers
bn_axis = 3 if tf.keras.backend.image_data_format() == 'channels_last' else 1
preact = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_preact_bn')(
x)
preact = layers.Activation('relu', name=name + '_preact_relu')(preact)
if conv_shortcut:
shortcut = layers.Conv2D(
filters, 1, strides=1, name=name + '_0_conv')(
preact)
else:
shortcut = layers.MaxPooling2D(1, strides=stride)(x) if stride > 1 else x
x = layers.ZeroPadding2D(
padding=((1, 1), (1, 1)), name=name + '_1_pad')(
preact)
x = layers.Conv2D(
filters, kernel_size, strides=1, use_bias=False, name=name + '_1_conv')(
x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')(
x)
x = layers.Activation('relu', name=name + '_1_relu')(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)
x = layers.Conv2D(
filters,
kernel_size,
strides=stride,
use_bias=False,
name=name + '_2_conv')(
x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_2_bn')(
x)
x = layers.Activation('relu', name=name + '_2_relu')(x)
x = layers.Add(name=name + '_out')([shortcut, x])
return x
def stack_basic(x, filters, blocks, stride1=2, name=None):
"""A set of stacked residual blocks for ResNet18/34.
Arguments:
x: input tensor.
filters: integer, filters of the bottleneck layer in a block.
blocks: integer, blocks in the stacked blocks.
stride1: default 2, stride of the first layer in the first block.
name: string, stack label.
Returns:
Output tensor for the stacked blocks.
"""
x = block_basic(x, filters, conv_shortcut=True, name=name + '_block1')
for i in range(2, blocks):
x = block_basic(x, filters, name=name + '_block' + str(i))
x = block_basic(
x, filters, stride=stride1, name=name + '_block' + str(blocks))
return x
def resnet_v1_18(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax'):
"""Instantiates the ResNet18 architecture."""
def stack_fn(x):
x = stack_basic(x, 64, 2, stride1=1, name='conv2')
x = stack_basic(x, 128, 2, name='conv3')
x = stack_basic(x, 256, 2, name='conv4')
return stack_basic(x, 512, 2, name='conv5')
return resnet.ResNet(
stack_fn,
True,
True,
'resnet18',
include_top,
weights,
input_tensor,
input_shape,
pooling,
classes,
classifier_activation=classifier_activation)
def resnet_v1_34(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax'):
"""Instantiates the ResNet34 architecture."""
def stack_fn(x):
x = stack_basic(x, 64, 3, stride1=1, name='conv2')
x = stack_basic(x, 128, 4, name='conv3')
x = stack_basic(x, 256, 6, name='conv4')
return stack_basic(x, 512, 3, name='conv5')
return resnet.ResNet(
stack_fn,
True,
True,
'resnet34',
include_top,
weights,
input_tensor,
input_shape,
pooling,
classes,
classifier_activation=classifier_activation)
| 38.795203 | 93 | 0.695677 |
2771a224720368e4526ad13fc919de48c612780e | 1,105 | py | Python | src/adafruit_blinka/microcontroller/nxp_imx8m/pin.py | not7cd/Adafruit_Blinka | 6db3142005428289cbe97b317f5a05506b3b0ef7 | [
"MIT"
] | null | null | null | src/adafruit_blinka/microcontroller/nxp_imx8m/pin.py | not7cd/Adafruit_Blinka | 6db3142005428289cbe97b317f5a05506b3b0ef7 | [
"MIT"
] | null | null | null | src/adafruit_blinka/microcontroller/nxp_imx8m/pin.py | not7cd/Adafruit_Blinka | 6db3142005428289cbe97b317f5a05506b3b0ef7 | [
"MIT"
] | 1 | 2020-09-03T16:21:38.000Z | 2020-09-03T16:21:38.000Z | """NXP IMX8M pin names"""
from adafruit_blinka.microcontroller.generic_linux.libgpiod_pin import Pin
I2C2_SCL = Pin(144) # GPIO5_IO16
I2C2_SDA = Pin(145) # GPIO5_IO17
I2C3_SCL = Pin(146) # GPIO5_IO18
I2C3_SDA = Pin(147) # GPIO5_IO19
PWM1 = Pin((0, 1)) # GPIO1_IO01
PWM2 = Pin((0, 13)) # GPIO1_IO13
PWM3 = Pin((0, 14)) # GPIO1_IO14
GPIO6 = Pin((0, 6)) # GPIO1_IO6
GPIO7 = Pin((0, 7)) # GPIO1_IO7
GPIO8 = Pin((0, 8)) # GPIO1_IO8
GPIO73 = Pin((2, 9)) # GPIO3_IO9
GPIO77 = Pin((2, 13)) # GPIO3_IO13
GPIO138 = Pin((4, 10)) # GPIO5_IO10
GPIO141 = Pin((4, 13)) # GPIO5_IO13
ECSPI1_MISO = Pin(136) # GPIO5_IO8
ECSPI1_MOSI = Pin(135) # GPIO5_IO7
ECSPI1_SCLK = Pin(134) # GPIO5_IO6
ECSPI1_SS0 = Pin(133) # GPIO5_IO9
i2cPorts = (
(1, I2C2_SCL, I2C2_SDA),
(2, I2C3_SCL, I2C3_SDA),
)
# ordered as spiId, sckId, mosiId, misoId
spiPorts = ((32766, ECSPI1_SCLK, ECSPI1_MOSI, ECSPI1_MISO),)
# SysFS pwm outputs, pwm channel and pin in first tuple
pwmOuts = (
((0, 0), PWM1),
((1, 0), PWM2),
((2, 0), PWM3),
)
# UART1_TXD/RXD on /dev/ttymxc0
# UART3_TXD/RXD not available (?)
| 25.113636 | 74 | 0.653394 |
40a6b849baa3d6157474b3fbb32815cc8922b2da | 63,514 | py | Python | Allura/allura/tests/functional/test_admin.py | rohankumardubey/allura | 9c490a051ca912d28b81ce656441d6fed100cb24 | [
"Apache-2.0"
] | null | null | null | Allura/allura/tests/functional/test_admin.py | rohankumardubey/allura | 9c490a051ca912d28b81ce656441d6fed100cb24 | [
"Apache-2.0"
] | 1 | 2021-10-17T11:32:56.000Z | 2021-10-17T11:32:56.000Z | Allura/allura/tests/functional/test_admin.py | rohankumardubey/allura | 9c490a051ca912d28b81ce656441d6fed100cb24 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import absolute_import
import os
from datetime import datetime
import pkg_resources
from io import BytesIO
import logging
from io import open
import tg
import PIL
from alluratest.tools import assert_equals, assert_in, assert_not_in, assert_is_not_none, assert_greater
from ming.orm.ormsession import ThreadLocalORMSession
from tg import expose
from tg import tmpl_context as c, app_globals as g
import mock
import six
import allura
from allura.tests import TestController
from allura.tests import decorators as td
from allura.tests.decorators import audits, out_audits
from alluratest.controller import TestRestApiBase, setup_trove_categories
from allura import model as M
from allura.app import SitemapEntry
from allura.lib.plugin import AdminExtension
from allura.lib import helpers as h
from allura.ext.admin.admin_main import AdminApp
from forgewiki.wiki_main import ForgeWikiApp
log = logging.getLogger(__name__)
class TestProjectAdmin(TestController):
def test_admin_controller(self):
self.app.get('/admin/')
with audits(
'change summary to Milkshakes are for crazy monkeys',
'change project name to My Test Project',
r'change short description to (\u00bf A Test Project \?){45}'):
r = self.app.post('/admin/update', params=dict(
name='My Test Project',
shortname='test',
summary='Milkshakes are for crazy monkeys',
short_description=('\u00bf A Test Project ?' * 45).encode('utf-8'),
labels='aaa,bbb'))
assert r.status_int == 302, (r.status, r.html.select('.error,.fielderror'))
r = self.app.get('/admin/overview')
assert b'A Test Project ?\xc2\xbf A' in r.body
assert 'Test Subproject' not in r
assert 'Milkshakes are for crazy monkeys' in r
sidebar = r.html.find(id='sidebar')
assert sidebar.find('a', href='/p/test/admin/overview'), sidebar
# Add a subproject
with audits('create subproject test-subproject'):
self.app.post('/admin/update_mounts', params={
'new.install': 'install',
'new.ep_name': '',
'new.ordinal': '1',
'new.mount_point': 'test-subproject',
'new.mount_label': 'Test Subproject'})
r = self.app.get('/admin/overview')
assert 'Test Subproject' in r
# Rename a subproject
with audits('update subproject test/test-subproject'):
self.app.post('/admin/update_mounts', params={
'subproject-0.shortname': 'test/test-subproject',
'subproject-0.name': 'Tst Sbprj',
'subproject-0.ordinal': '100',
})
r = self.app.get('/admin/overview')
assert 'Tst Sbprj' in r
# Remove a subproject
with audits('delete subproject test/test-subproject'):
self.app.post('/admin/update_mounts', params={
'subproject-0.delete': 'on',
'subproject-0.shortname': 'test/test-subproject',
'new.ep_name': '',
})
# Add a tool
with audits('install tool test-tool'):
r = self.app.post('/admin/update_mounts', params={
'new.install': 'install',
'new.ep_name': 'Wiki',
'new.ordinal': '1',
'new.mount_point': 'test-tool',
'new.mount_label': 'Test Tool'})
assert 'error' not in self.webflash(r)
# check tool in the nav
r = self.app.get('/p/test/test-tool/').follow()
active_link = r.html.findAll('li', {'class': 'selected'})
assert_equals(len(active_link), 1)
assert active_link[0].contents[1]['href'] == '/p/test/test-tool/'
with audits('install tool test-tool2'):
r = self.app.post('/admin/update_mounts', params={
'new.install': 'install',
'new.ep_name': 'Wiki',
'new.ordinal': '1',
'new.mount_point': 'test-tool2',
'new.mount_label': 'Test Tool2'})
assert 'error' not in self.webflash(r)
# check the nav - tools of same type are grouped
r = self.app.get('/p/test/test-tool/Home/')
active_link = r.html.findAll('li', {'class': 'selected'})
assert len(active_link) == 2
assert active_link[0].contents[1]['href'] == '/p/test/_list/wiki'
assert r.html.findAll('a', {'href': '/p/test/test-tool2/'})
assert r.html.findAll('a', {'href': '/p/test/test-tool/'})
# check can't create dup tool
r = self.app.post('/admin/update_mounts', params={
'new.install': 'install',
'new.ep_name': 'Wiki',
'new.ordinal': '1',
'new.mount_point': 'test-tool',
'new.mount_label': 'Test Tool'})
assert 'error' in self.webflash(r)
# Rename a tool
with audits('update tool test-tool'):
self.app.post('/admin/update_mounts', params={
'tool-0.mount_point': 'test-tool',
'tool-0.mount_label': 'Tst Tuul',
'tool-0.ordinal': '200',
})
r = self.app.get('/admin/overview')
assert 'Tst Tuul' in r
# Remove a tool
with audits('uninstall tool test-tool'):
self.app.post('/admin/update_mounts', params={
'tool-0.delete': 'on',
'tool-0.mount_point': 'test-tool',
'new.ep_name': '',
})
# Check the audit log
r = self.app.get('/admin/audit/')
assert "uninstall tool test-tool" in r.text, r.text
# Make sure several 'project_menu_updated' events got sent
menu_updated_events = M.MonQTask.query.find({
'task_name': 'allura.tasks.event_tasks.event',
'args': 'project_menu_updated'
}).all()
assert_equals(len(menu_updated_events), 7)
def test_features(self):
proj = M.Project.query.get(shortname='test')
assert_equals(proj.features, [])
with audits(r"change project features to \[{u}'One', {u}'Two'\]".format(u='u' if six.PY2 else '')):
resp = self.app.post('/admin/update', params={
'features-0.feature': 'One',
'features-1.feature': ' ',
'features-2.feature': ' Two '})
if resp.status_int == 200:
errors = resp.html.findAll('', attrs={'class': 'fielderror'})
assert_equals([], errors)
errors = resp.html.findAll('', attrs={'class': 'error'})
assert_equals([], errors)
raise AssertionError('Should be a 301 not 200 response')
r = self.app.get('/admin/overview')
features = r.html.find('div', {'id': 'features'})
features = features.findAll('input', {'type': 'text'})
# two features + extra empty input + stub hidden input for js
assert_equals(len(features), 2+1+1)
assert_equals(features[0]['value'], 'One')
assert_equals(features[1]['value'], 'Two')
proj = M.Project.query.get(shortname='test')
assert_equals(proj.features, ['One', 'Two'])
@td.with_wiki
def test_block_user_empty_data(self):
r = self.app.post('/admin/wiki/block_user',
params={'username': '', 'perm': '', 'reason': ''})
assert_equals(r.json, dict(error='Enter username'))
@td.with_wiki
def test_unblock_user_empty_data(self):
r = self.app.post('/admin/wiki/unblock_user',
params={'user_id': '', 'perm': ''})
assert_equals(r.json, dict(error='Select user to unblock'))
@td.with_wiki
def test_block_user(self):
r = self.app.get('/admin/wiki/permissions')
assert '<input type="checkbox" name="user_id"' not in r
user = M.User.by_username('test-admin')
r = self.app.post('/admin/wiki/block_user',
params={'username': 'test-admin', 'perm': 'read', 'reason': 'Comment'})
assert_equals(
r.json, dict(user_id=str(user._id), username='test-admin', reason='Comment'))
user = M.User.by_username('test-admin')
admin_role = M.ProjectRole.by_user(user)
app = M.Project.query.get(shortname='test').app_instance('wiki')
ace = M.ACL.contains(M.ACE.deny(admin_role._id, 'read'), app.acl)
assert_equals(ace.reason, 'Comment')
r = self.app.get('/admin/wiki/permissions')
assert '<input type="checkbox" name="user_id" value="%s">test-admin (Comment)' % user._id in r
@td.with_wiki
def test_unblock_user(self):
r = self.app.post('/admin/wiki/block_user',
params={'username': 'test-admin', 'perm': 'read'})
user = M.User.by_username('test-admin')
admin_role = M.ProjectRole.by_user(user)
app = M.Project.query.get(shortname='test').app_instance('wiki')
ace = M.ACE.deny(admin_role._id, 'read')
r = self.app.get('/admin/wiki/permissions')
assert '<input type="checkbox" name="user_id" value="%s">test-admin' % user._id in r
app = M.Project.query.get(shortname='test').app_instance('wiki')
assert M.ACL.contains(ace, app.acl) is not None
r = self.app.post('/admin/wiki/unblock_user',
params={'user_id': str(user._id), 'perm': 'read'})
assert_equals(r.json, dict(unblocked=[str(user._id)]))
assert M.ACL.contains(ace, app.acl) is None
r = self.app.get('/admin/wiki/permissions')
assert '<input type="checkbox" name="user_id"' not in r
@td.with_wiki
def test_block_unblock_multiple_users(self):
self.app.post('/admin/wiki/block_user',
params={'username': 'test-admin', 'perm': 'read', 'reason': 'Spammer'})
self.app.post('/admin/wiki/block_user',
params={'username': 'test-user', 'perm': 'read'})
admin = M.User.by_username('test-admin')
user = M.User.by_username('test-user')
admin_role = M.ProjectRole.by_user(admin)
user_role = M.ProjectRole.by_user(user)
app = M.Project.query.get(shortname='test').app_instance('wiki')
deny_admin = M.ACE.deny(admin_role._id, 'read')
deny_user = M.ACE.deny(user_role._id, 'read')
assert M.ACL.contains(deny_admin, app.acl) is not None
assert M.ACL.contains(deny_user, app.acl) is not None
r = self.app.get('/admin/wiki/permissions')
assert '<input type="checkbox" name="user_id" value="%s">test-admin (Spammer)' % admin._id in r
assert '<input type="checkbox" name="user_id" value="%s">test-user' % user._id in r
self.app.post('/admin/wiki/unblock_user',
params={'user_id': str(user._id), 'perm': 'read'})
self.app.post('/admin/wiki/unblock_user',
params={'user_id': str(admin._id), 'perm': 'read'})
app = M.Project.query.get(shortname='test').app_instance('wiki')
assert M.ACL.contains(deny_admin, app.acl) is None
assert M.ACL.contains(deny_user, app.acl) is None
r = self.app.get('/admin/wiki/permissions')
assert '<input type="checkbox" name="user_id"' not in r
@td.with_wiki
def test_blocked_users_remains_after_saving_all_permissions(self):
self.app.post('/admin/wiki/block_user',
params={'username': 'test-user', 'perm': 'read', 'reason': 'Comment'})
self.app.post('/admin/wiki/block_user',
params={'username': 'test-user', 'perm': 'post', 'reason': 'Comment'})
user = M.User.by_username('test-user')
user_role = M.ProjectRole.by_user(user)
app = M.Project.query.get(shortname='test').app_instance('wiki')
assert M.ACL.contains(M.ACE.deny(user_role._id, 'read'), app.acl)
assert M.ACL.contains(M.ACE.deny(user_role._id, 'post'), app.acl)
old_acl = app.acl
permissions_page = self.app.get('/admin/wiki/permissions')
permissions_page.forms[0].submit()
# deny ACEs for user still should be there
app = M.Project.query.get(shortname='test').app_instance('wiki')
assert M.ACL.contains(M.ACE.deny(user_role._id, 'read'), app.acl)
assert M.ACL.contains(M.ACE.deny(user_role._id, 'post'), app.acl)
# ...and all old ACEs also
for ace in old_acl:
assert_in(ace, app.acl)
def test_tool_permissions(self):
BUILTIN_APPS = ['activity', 'blog', 'discussion', 'git', 'link',
'shorturl', 'svn', 'tickets', 'userstats', 'wiki']
self.app.get('/admin/')
project = M.Project.query.get(shortname='test')
for i, ep in enumerate(pkg_resources.iter_entry_points('allura')):
App = ep.load()
tool = ep.name
cfg = M.AppConfig(
project_id=project._id,
tool_name=tool,
options={'mount_point': '', 'mount_label': ''})
app = App(project, cfg)
if not app.installable or ep.name.lower() not in BUILTIN_APPS:
continue
with audits('install tool test-%d' % i):
self.app.post('/admin/update_mounts', params={
'new.install': 'install',
'new.ep_name': tool,
'new.ordinal': str(i),
'new.mount_point': 'test-%d' % i,
'new.mount_label': tool})
r = self.app.get('/admin/test-%d/permissions' % i)
cards = [
tag for tag in r.html.findAll('input')
if (
tag.get('type') == 'hidden' and
tag.get('name') and
tag['name'].startswith('card-') and
tag['name'].endswith('.id'))]
assert len(cards) == len(app.permissions), cards
def test_tool_installation_limit(self):
with mock.patch.object(ForgeWikiApp, 'max_instances') as mi:
mi.__get__ = mock.Mock(return_value=1)
c.user = M.User.query.get(username='root')
c.project = M.Project.query.get(shortname='test')
data = c.project.nav_data(admin_options=True)
menu = [tool['text'] for tool in data['installable_tools']]
assert_in('Wiki', menu)
r = self.app.post('/p/test/admin/update_mounts/', params={
'new.install': 'install',
'new.ep_name': 'Wiki',
'new.ordinal': '1',
'new.mount_point': 'wiki',
'new.mount_label': 'Wiki'})
c.project = M.Project.query.get(shortname='test')
data = c.project.nav_data(admin_options=True)
menu = [tool['text'] for tool in data['installable_tools']]
assert_not_in('Wiki', menu)
r = self.app.post('/p/test/admin/update_mounts/', params={
'new.install': 'install',
'new.ep_name': 'Wiki',
'new.ordinal': '1',
'new.mount_point': 'wiki2',
'new.mount_label': 'Wiki 2'})
assert 'error' in self.webflash(r)
assert 'limit exceeded' in self.webflash(r)
def test_install_tool_form(self):
r = self.app.get('/admin/install_tool?tool_name=wiki')
assert_in('Installing Wiki', r)
def test_install_tool_form_options(self):
opts = ['AllowEmailPosting']
with mock.patch.object(ForgeWikiApp, 'config_on_install', new=opts):
r = self.app.get('/admin/install_tool?tool_name=wiki')
assert_in('<input id="AllowEmailPosting" name="AllowEmailPosting"', r)
def test_install_tool_form_subproject(self):
r = self.app.get('/admin/install_tool?tool_name=subproject')
assert_in('Installing Sub Project', r)
def test_project_icon(self):
file_name = 'neo-icon-set-454545-256x350.png'
file_path = os.path.join(
allura.__path__[0], 'nf', 'allura', 'images', file_name)
file_data = open(file_path, 'rb').read()
upload = ('icon', file_name, file_data)
self.app.get('/admin/')
with audits('update project icon'):
self.app.post('/admin/update', params=dict(
name='Test Project',
shortname='test',
short_description='A Test Project'),
upload_files=[upload])
r = self.app.get('/p/test/icon')
image = PIL.Image.open(BytesIO(r.body))
assert image.size == (48, 48)
r = self.app.get('/p/test/icon?foo=bar')
r = self.app.get('/p/test/icon?w=96')
image = PIL.Image.open(BytesIO(r.body))
assert image.size == (96, 96)
r = self.app.get('/p/test/icon?w=12345', status=404)
def test_project_screenshot(self):
file_name = 'neo-icon-set-454545-256x350.png'
file_path = os.path.join(
allura.__path__[0], 'nf', 'allura', 'images', file_name)
file_data = open(file_path, 'rb').read()
upload = ('screenshot', file_name, file_data)
self.app.get('/admin/')
with audits('add screenshot'):
self.app.post('/admin/add_screenshot', params=dict(
caption='test me'),
upload_files=[upload])
p_nbhd = M.Neighborhood.query.get(name='Projects')
project = M.Project.query.get(
shortname='test', neighborhood_id=p_nbhd._id)
filename = project.get_screenshots()[0].filename
r = self.app.get('/p/test/screenshot/' + filename)
uploaded = PIL.Image.open(file_path)
screenshot = PIL.Image.open(BytesIO(r.body))
assert uploaded.size == screenshot.size
r = self.app.get('/p/test/screenshot/' + filename + '/thumb')
thumb = PIL.Image.open(BytesIO(r.body))
assert thumb.size == (150, 150)
# FIX: home pages don't currently support screenshots (now that they're a wiki);
# reinstate this code (or appropriate) when we have a macro for that
# r = self.app.get('/p/test/home/')
# assert '/p/test/screenshot/'+filename in r
# assert 'test me' in r
# test edit
req = self.app.get('/admin/screenshots')
req.forms[0]['caption'].value = 'aaa'
req.forms[0].submit()
# r = self.app.get('/p/test/home/')
# assert 'aaa' in r
# test delete
req = self.app.get('/admin/screenshots')
req.forms[1].submit()
# r = self.app.get('/p/test/home/')
# assert 'aaa' not in r
def test_sort_screenshots(self):
for file_name in ('admin_24.png', 'admin_32.png'):
file_path = os.path.join(allura.__path__[0], 'nf', 'allura',
'images', file_name)
file_data = open(file_path, 'rb').read()
upload = ('screenshot', file_name, file_data)
self.app.post('/admin/add_screenshot', params=dict(
caption=file_name),
upload_files=[upload])
p_nbhd = M.Neighborhood.query.get(name='Projects')
project = M.Project.query.get(shortname='test',
neighborhood_id=p_nbhd._id)
# first uploaded is first by default
screenshots = project.get_screenshots()
assert_equals(screenshots[0].filename, 'admin_24.png')
# reverse order
params = dict((str(ss._id), str(len(screenshots) - 1 - i))
for i, ss in enumerate(screenshots))
self.app.post('/admin/sort_screenshots', params)
assert_equals(project.get_screenshots()[0].filename, 'admin_32.png')
def test_project_delete_undelete(self):
# create a subproject
with audits('create subproject sub-del-undel'):
self.app.post('/admin/update_mounts', params={
'new.install': 'install',
'new.ep_name': '',
'new.ordinal': '1',
'new.mount_point': 'sub-del-undel',
'new.mount_label': 'sub-del-undel'})
r = self.app.get('/p/test/admin/overview')
assert 'This project has been deleted and is not visible to non-admin users' not in r
assert r.html.find('input', {'name': 'removal', 'value': ''}).has_attr('checked')
assert not r.html.find('input', {'name': 'removal', 'value': 'deleted'}).has_attr('checked')
with audits('delete project'):
self.app.post('/admin/update', params=dict(
name='Test Project',
shortname='test',
removal='deleted',
short_description='A Test Project',
delete='on'))
r = self.app.get('/p/test/admin/overview')
assert 'This project has been deleted and is not visible to non-admin users' in r
assert not r.html.find('input', {'name': 'removal', 'value': ''}).has_attr('checked')
assert r.html.find('input', {'name': 'removal', 'value': 'deleted'}).has_attr('checked')
# make sure subprojects get deleted too
r = self.app.get('/p/test/sub-del-undel/admin/overview')
assert 'This project has been deleted and is not visible to non-admin users' in r
with audits('undelete project'):
self.app.post('/admin/update', params=dict(
name='Test Project',
shortname='test',
removal='',
short_description='A Test Project',
undelete='on'))
r = self.app.get('/p/test/admin/overview')
assert 'This project has been deleted and is not visible to non-admin users' not in r
assert r.html.find('input', {'name': 'removal', 'value': ''}).has_attr('checked')
assert not r.html.find('input', {'name': 'removal', 'value': 'deleted'}).has_attr('checked')
def test_project_delete_not_allowed(self):
# turn off project delete option
from allura.ext.admin.admin_main import config
old_allow_project_delete = config.get('allow_project_delete', ())
config['allow_project_delete'] = False
try:
# create a subproject
with audits('create subproject sub-no-del'):
self.app.post('/admin/update_mounts', params={
'new.install': 'install',
'new.ep_name': '',
'new.ordinal': '1',
'new.mount_point': 'sub-no-del',
'new.mount_label': 'sub-no-del'})
# root project doesn't have delete option
r = self.app.get('/p/test/admin/overview')
assert not r.html.find('input', {'name': 'removal', 'value': 'deleted'})
# subprojects can still be deleted
r = self.app.get('/p/test/sub-no-del/admin/overview')
assert r.html.find('input', {'name': 'removal', 'value': 'deleted'})
# attempt to delete root project won't do anything
self.app.post('/admin/update', params=dict(
name='Test Project',
shortname='test',
removal='deleted',
short_description='A Test Project',
delete='on'))
r = self.app.get('/p/test/admin/overview')
assert 'This project has been deleted and is not visible to non-admin users' not in r
# make sure subproject delete works
with audits(
'change project removal status to deleted',
'delete project'):
self.app.post('/p/test/sub-no-del/admin/update', params=dict(
name='sub1',
shortname='sub1',
removal='deleted',
short_description='A Test Project',
delete='on'))
r = self.app.get('/p/test/sub-no-del/admin/overview')
assert 'This project has been deleted and is not visible to non-admin users' in r
assert r.html.find('input', {'name': 'removal', 'value': 'deleted'}).has_attr('checked')
finally:
if old_allow_project_delete == ():
del config['allow_project_delete']
else:
config['allow_project_delete'] = old_allow_project_delete
def test_add_remove_trove_cat(self):
setup_trove_categories()
r = self.app.get('/admin/trove')
assert 'No Database Environment categories have been selected.' in r
assert '<span class="trove_fullpath">Database API</span>' not in r
# add a cat
with audits('add trove root_database: Database Environment :: Database API'):
form = r.forms['add_trove_root_database']
form['new_trove'].value = '506'
r = form.submit().follow()
# make sure it worked
assert 'No Database Environment categories have been selected.' not in r
assert '<span class="trove_fullpath">Database API » Python Database API</span>' in r
# delete the cat
with audits('remove trove root_database: Database Environment :: Database API'):
r = r.forms['delete_trove_root_database_506'].submit().follow()
# make sure it worked
assert 'No Database Environment categories have been selected.' in r
assert '<span class="trove_fullpath">Database API » Python Database API</span>' not in r
def test_add_remove_label(self):
setup_trove_categories()
r = self.app.get('/admin/trove')
form = r.forms['label_edit_form']
form['labels'].value = 'foo,bar,baz'
with audits('updated labels'):
r = form.submit()
r = r.follow()
p_nbhd = M.Neighborhood.query.get(name='Projects')
p = M.Project.query.get(shortname='test', neighborhood_id=p_nbhd._id)
assert p.labels == ['foo', 'bar', 'baz']
assert form['labels'].value == 'foo,bar,baz'
ThreadLocalORMSession.close_all()
form['labels'].value = 'asdf'
with audits('updated labels'):
r = form.submit()
r = r.follow()
p = M.Project.query.get(shortname='test', neighborhood_id=p_nbhd._id)
assert_equals(p.labels, ['asdf'])
assert form['labels'].value == 'asdf'
@td.with_wiki
def test_log_permission(self):
r = self.app.get('/admin/wiki/permissions')
select = r.html.find('select', {'name': 'card-0.new'})
opt_admin = select.find(text='Admin').parent
opt_developer = select.find(text='Developer').parent
assert opt_admin.name == 'option'
assert opt_developer.name == 'option'
with audits('updated "admin" permission: "Admin" => "Admin, Developer" for wiki'):
self.app.post('/admin/wiki/update', params={
'card-0.new': opt_developer['value'],
'card-0.value': opt_admin['value'],
'card-0.id': 'admin'})
with audits('updated "admin" permission: "Admin, Developer" => "Admin" for wiki'):
self.app.post('/admin/wiki/update', params={
'card-0.value': opt_admin['value'],
'card-0.id': 'admin'})
def test_project_permissions(self):
r = self.app.get('/admin/permissions/', status=302)
assert_in('/admin/groups', r.location)
def test_subproject_permissions(self):
with audits('create subproject test-subproject'):
self.app.post('/admin/update_mounts', params={
'new.install': 'install',
'new.ep_name': '',
'new.ordinal': '1',
'new.mount_point': 'test-subproject',
'new.mount_label': 'Test Subproject'})
r = self.app.get('/test-subproject/admin/permissions/')
assert len(r.html.findAll('input', {'name': 'card-0.value'})) == 0
select = r.html.find('select', {'name': 'card-0.new'})
opt_admin = select.find(text='Admin').parent
opt_developer = select.find(text='Developer').parent
assert opt_admin.name == 'option'
assert opt_developer.name == 'option'
with audits('updated "admin" permissions: "" => "Admin,Developer"'):
r = self.app.post('/test-subproject/admin/permissions/update', params={
'card-0.new': opt_developer['value'],
'card-0.value': opt_admin['value'],
'card-0.id': 'admin'})
r = self.app.get('/test-subproject/admin/permissions/')
assigned_ids = [t['value']
for t in r.html.findAll('input', {'name': 'card-0.value'})]
assert len(assigned_ids) == 2
assert opt_developer['value'] in assigned_ids
assert opt_admin['value'] in assigned_ids
def test_project_groups(self):
r = self.app.get('/admin/groups/')
dev_holder = r.html.find('table', {'id': 'usergroup_admin'}).findAll('tr')[2]
developer_id = dev_holder['data-group']
with audits('add user test-user to Developer'):
r = self.app.post('/admin/groups/add_user', params={
'role_id': developer_id,
'username': 'test-user'})
r = self.app.get('/admin/groups/')
dev_holder = r.html.find('table', {'id': 'usergroup_admin'}).findAll('tr')[2]
users = dev_holder.find('ul', {'class': 'users'}).findAll(
'li', {'class': 'deleter'})
assert 'test-user' in users[0]['data-user']
def test_new_admin_subscriptions(self):
"""Newly added admin must be subscribed to all the tools in the project"""
r = self.app.get('/admin/groups/')
admin_holder = r.html.find(
'table', {'id': 'usergroup_admin'}).findAll('tr')[1]
admin_id = admin_holder['data-group']
with audits('add user test-user to Admin'):
self.app.post('/admin/groups/add_user', params={
'role_id': admin_id,
'username': 'test-user'})
p_nbhd = M.Neighborhood.query.get(name='Projects')
p = M.Project.query.get(shortname='test', neighborhood_id=p_nbhd._id)
uid = M.User.by_username('test-user')._id
for ac in p.app_configs:
sub = M.Mailbox.subscribed(
user_id=uid, project_id=p._id, app_config_id=ac._id)
assert sub, 'New admin not subscribed to app %s' % ac
def test_new_user_subscriptions(self):
"""Newly added user must not be subscribed to all the tools in the project if he is not admin"""
r = self.app.get('/admin/groups/')
dev_holder = r.html.find(
'table', {'id': 'usergroup_admin'}).findAll('tr')[2]
developer_id = dev_holder['data-group']
with audits('add user test-user to Developer'):
self.app.post('/admin/groups/add_user', params={
'role_id': developer_id,
'username': 'test-user'})
p_nbhd = M.Neighborhood.query.get(name='Projects')
p = M.Project.query.get(shortname='test', neighborhood_id=p_nbhd._id)
uid = M.User.by_username('test-user')._id
for ac in p.app_configs:
sub = M.Mailbox.subscribed(
user_id=uid, project_id=p._id, app_config_id=ac._id)
assert not sub, 'New user subscribed to app %s' % ac
def test_subroles(self):
"""Make sure subroles are preserved during group updates."""
def check_roles(r):
dev_holder = r.html.find(
'table', {'id': 'usergroup_admin'}).findAll('tr')[2]
mem_holder = r.html.find(
'table', {'id': 'usergroup_admin'}).findAll('tr')[3]
assert 'All users in Admin group' in dev_holder.text
assert 'All users in Developer group' in mem_holder.text
r = self.app.get('/admin/groups/')
admin_holder = r.html.find(
'table', {'id': 'usergroup_admin'}).findAll('tr')[1]
admin_id = admin_holder['data-group']
# test that subroles are intact after user added
with audits('add user test-user to Admin'):
r = self.app.post('/admin/groups/add_user', params={
'role_id': admin_id,
'username': 'test-user'})
r = self.app.get('/admin/groups/')
check_roles(r)
# test that subroles are intact after user deleted
with audits('remove user test-user from Admin'):
r = self.app.post('/admin/groups/remove_user', params={
'role_id': admin_id,
'username': 'test-user'})
r = self.app.get('/admin/groups/')
check_roles(r)
def test_cannot_remove_all_admins(self):
"""Must always have at least one user with the Admin role (and anon
doesn't count)."""
r = self.app.get('/admin/groups/')
admin_holder = r.html.find(
'table', {'id': 'usergroup_admin'}).findAll('tr')[1]
admin_id = admin_holder['data-group']
users = admin_holder.find('ul', {'class': 'users'}).findAll(
'li', {'class': 'deleter'})
assert len(users) == 1
r = self.app.post('/admin/groups/remove_user', params={
'role_id': admin_id,
'username': 'admin1'})
assert r.json[
'error'] == 'You must have at least one user with the Admin role.'
r = self.app.get('/admin/groups/')
admin_holder = r.html.find(
'table', {'id': 'usergroup_admin'}).findAll('tr')[1]
users = admin_holder.find('ul', {'class': 'users'}).findAll(
'li', {'class': 'deleter'})
assert len(users) == 1
def test_cannot_add_anon_to_group(self):
r = self.app.get('/admin/groups/')
dev_holder = r.html.find(
'table', {'id': 'usergroup_admin'}).findAll('tr')[2]
developer_id = dev_holder['data-group']
r = self.app.post('/admin/groups/add_user', params={
'role_id': developer_id,
'username': ''})
assert r.json['error'] == 'You must choose a user to add.'
r = self.app.get('/admin/groups/')
dev_holder = r.html.find(
'table', {'id': 'usergroup_admin'}).findAll('tr')[2]
users = dev_holder.find('ul', {'class': 'users'}).findAll(
'li', {'class': 'deleter'})
# no user was added
assert len(users) == 0
assert M.ProjectRole.query.find(dict(
name='*anonymous', user_id=None,
roles={'$ne': []})).count() == 0
def test_project_multi_groups(self):
r = self.app.get('/admin/groups/')
admin_holder = r.html.find('table', {'id': 'usergroup_admin'}).findAll('tr')[1]
admin_id = admin_holder['data-group']
with audits('add user test-user to Admin'):
r = self.app.post('/admin/groups/add_user', params={
'role_id': admin_id,
'username': 'test-user'})
assert 'error' not in r.json
r = self.app.post('/admin/groups/add_user', params={
'role_id': admin_id,
'username': 'test-user'})
assert r.json['error'] == 'Test User (test-user) is already in the group Admin.'
r = self.app.get('/admin/groups/')
assert 'test-user' in str(r), r.showbrowser()
with audits('remove user test-user from Admin'):
r = self.app.post('/admin/groups/remove_user', params={
'role_id': admin_id,
'username': 'test-user'})
r = self.app.get('/admin/groups/')
assert 'test-user' not in str(r), r.showbrowser()
@td.with_wiki
def test_new_group(self):
r = self.app.get('/admin/groups/new', validate_chunk=True)
with audits('create group Developer'):
r = self.app.post('/admin/groups/create',
params={'name': 'Developer'})
assert 'error' in self.webflash(r)
with audits('create group RoleNew1'):
r = self.app.post('/admin/groups/create',
params={'name': 'RoleNew1'})
r = self.app.get('/admin/groups/')
role_holder = r.html.find('table', {'id': 'usergroup_admin'}).findAll('tr')[4]
assert 'RoleNew1' in role_holder.text
role_id = role_holder['data-group']
# add test-user to role
role_holder = r.html.find('table', {'id': 'usergroup_admin'}).findAll('tr')[4]
with audits('add user test-user to RoleNew1'):
r = self.app.post('/admin/groups/add_user', params={
'role_id': role_id,
'username': 'test-user'})
with audits('delete group RoleNew1'):
r = self.app.post('/admin/groups/delete_group', params={
'group_name': 'RoleNew1'})
assert 'deleted' in self.webflash(r)
r = self.app.get('/admin/groups/', status=200)
roles = [t.text for t in r.html.findAll('td', {'class': 'group'})]
assert 'RoleNew1' not in roles
# make sure can still access homepage after one of user's roles were
# deleted
r = self.app.get('/p/test/wiki/',
extra_environ=dict(username=str('test-user'))).follow()
assert r.status == '200 OK'
def test_change_perms(self):
r = self.app.get('/admin/groups/')
dev_holder = r.html.find('table', {'id': 'usergroup_admin'}).findAll('tr')[2]
mem_holder = r.html.find('table', {'id': 'usergroup_admin'}).findAll('tr')[3]
mem_id = mem_holder['data-group']
# neither group has create permission
assert dev_holder.select_one('li[data-permission=create]')['class'] == ["no"]
assert mem_holder.select_one('li[data-permission=create]')['class'] == ["no"]
# add create permission to Member
r = self.app.post('/admin/groups/change_perm', params={
'role_id': mem_id,
'permission': 'create',
'allow': 'true'})
r = self.app.get('/admin/groups/')
dev_holder = r.html.find('table', {'id': 'usergroup_admin'}).findAll('tr')[2]
mem_holder = r.html.find('table', {'id': 'usergroup_admin'}).findAll('tr')[3]
# Member now has create permission
assert mem_holder.select_one('li[data-permission=create]')['class'] == ["yes"]
# Developer has inherited create permission from Member
assert dev_holder.select_one('li[data-permission=create]')['class'] == ["inherit"]
# remove update permission from Member
r = self.app.post('/admin/groups/change_perm', params={
'role_id': mem_id,
'permission': 'create',
'allow': 'false'})
r = self.app.get('/admin/groups/')
dev_holder = r.html.find('table', {'id': 'usergroup_admin'}).findAll('tr')[2]
mem_holder = r.html.find('table', {'id': 'usergroup_admin'}).findAll('tr')[3]
# neither group has create permission
assert dev_holder.select_one('li[data-permission=create]')['class'] == ["no"]
assert mem_holder.select_one('li[data-permission=create]')['class'] == ["no"]
def test_permission_inherit(self):
r = self.app.get('/admin/groups/')
admin_holder = r.html.find('table', {'id': 'usergroup_admin'}).findAll('tr')[1]
admin_id = admin_holder['data-group']
mem_holder = r.html.find('table', {'id': 'usergroup_admin'}).findAll('tr')[3]
mem_id = mem_holder['data-group']
anon_holder = r.html.find('table', {'id': 'usergroup_admin'}).findAll('tr')[5]
anon_id = anon_holder['data-group']
# first remove create from Admin so we can see it inherit
r = self.app.post('/admin/groups/change_perm', params={
'role_id': admin_id,
'permission': 'create',
'allow': 'false'})
# updates to anon inherit up
r = self.app.post('/admin/groups/change_perm', params={
'role_id': anon_id,
'permission': 'create',
'allow': 'true'})
assert {'text': 'Inherited permission create from Anonymous',
'has': 'inherit', 'name': 'create'} in r.json[admin_id]
assert {'text': 'Inherited permission create from Anonymous',
'has': 'inherit', 'name': 'create'} in r.json[mem_id]
assert {'text': 'Has permission create', 'has':
'yes', 'name': 'create'} in r.json[anon_id]
r = self.app.post('/admin/groups/change_perm', params={
'role_id': anon_id,
'permission': 'create',
'allow': 'false'})
assert {'text': 'Does not have permission create',
'has': 'no', 'name': 'create'} in r.json[admin_id]
assert {'text': 'Does not have permission create',
'has': 'no', 'name': 'create'} in r.json[mem_id]
assert {'text': 'Does not have permission create',
'has': 'no', 'name': 'create'} in r.json[anon_id]
# updates to Member inherit up
r = self.app.post('/admin/groups/change_perm', params={
'role_id': mem_id,
'permission': 'create',
'allow': 'true'})
assert {'text': 'Inherited permission create from Member',
'has': 'inherit', 'name': 'create'} in r.json[admin_id]
assert {'text': 'Has permission create', 'has':
'yes', 'name': 'create'} in r.json[mem_id]
assert {'text': 'Does not have permission create',
'has': 'no', 'name': 'create'} in r.json[anon_id]
r = self.app.post('/admin/groups/change_perm', params={
'role_id': mem_id,
'permission': 'create',
'allow': 'false'})
assert {'text': 'Does not have permission create',
'has': 'no', 'name': 'create'} in r.json[admin_id]
assert {'text': 'Does not have permission create',
'has': 'no', 'name': 'create'} in r.json[mem_id]
assert {'text': 'Does not have permission create',
'has': 'no', 'name': 'create'} in r.json[anon_id]
def test_admin_extension_sidebar(self):
class FooSettingsController(object):
@expose()
def index(self, *a, **kw):
return 'here the foo settings go'
class FooSettingsExtension(AdminExtension):
def update_project_sidebar_menu(self, sidebar_links):
base_url = c.project.url() + 'admin/ext/'
sidebar_links.append(
SitemapEntry('Foo Settings', base_url + 'foo'))
@property
def project_admin_controllers(self):
return {
'foo': FooSettingsController,
}
eps = {
'admin': {
'foo-settings': FooSettingsExtension,
}
}
with mock.patch.dict(g.entry_points, eps):
main_page = self.app.get('/admin/')
foo_page = main_page.click(description='Foo Settings')
url = foo_page.request.path
assert url.endswith('/admin/ext/foo'), url
assert_equals('here the foo settings go', foo_page.text)
def test_nbhd_invitations(self):
r = self.app.get('/admin/invitations')
r.mustcontain('Neighborhood Invitation(s) for test')
class TestExport(TestController):
def setUp(self):
super(TestExport, self).setUp()
self.setup_with_tools()
@td.with_wiki
@td.with_tool('test', 'Wiki', 'wiki2', 'Wiki2')
def setup_with_tools(self):
pass
def test_exportable_tools_for(self):
project = M.Project.query.get(shortname='test')
exportable_tools = AdminApp.exportable_tools_for(project)
exportable_mount_points = [
t.options.mount_point for t in exportable_tools]
assert_equals(exportable_mount_points, ['admin', 'wiki', 'wiki2'])
def test_access(self):
r = self.app.get('/admin/export',
extra_environ={'username': str('*anonymous')}).follow()
assert_equals(r.request.url,
'http://localhost/auth/?return_to=%2Fadmin%2Fexport')
self.app.get('/admin/export',
extra_environ={'username': str('test-user')},
status=403)
r = self.app.post('/admin/export',
extra_environ={'username': str('*anonymous')}).follow()
assert_equals(r.request.url, 'http://localhost/auth/')
self.app.post('/admin/export',
extra_environ={'username': str('test-user')},
status=403)
def test_ini_option(self):
tg.config['bulk_export_enabled'] = 'false'
r = self.app.get('/admin/')
assert_not_in('Export', r)
r = self.app.get('/admin/export', status=404)
tg.config['bulk_export_enabled'] = 'true'
r = self.app.get('/admin/')
assert_in('Export', r)
@mock.patch('allura.model.session.project_doc_session')
def test_export_page_contains_exportable_tools(self, session):
session.return_value = {'result': [{"total_size": 10000}]}
r = self.app.get('/admin/export')
assert_in('Wiki</label> <a href="/p/test/wiki/">/p/test/wiki/</a>', r)
assert_in(
'Wiki2</label> <a href="/p/test/wiki2/">/p/test/wiki2/</a>', r)
assert_not_in(
'Search</label> <a href="/p/test/search/">/p/test/search/</a>', r)
def test_export_page_contains_hidden_tools(self):
with mock.patch('allura.ext.search.search_main.SearchApp.exportable'):
project = M.Project.query.get(shortname='test')
exportable_tools = AdminApp.exportable_tools_for(project)
exportable_mount_points = [
t.options.mount_point for t in exportable_tools]
assert_equals(exportable_mount_points,
['admin', 'search', 'wiki', 'wiki2'])
def test_tools_not_selected(self):
r = self.app.post('/admin/export')
assert_in('error', self.webflash(r))
def test_bad_tool(self):
r = self.app.post('/admin/export', {'tools': 'search'})
assert_in('error', self.webflash(r))
@mock.patch('allura.ext.admin.admin_main.export_tasks')
@mock.patch.dict(tg.config, {'bulk_export_filename': '{project}.zip'})
def test_selected_one_tool(self, export_tasks):
r = self.app.post('/admin/export', {'tools': 'wiki'})
assert_in('ok', self.webflash(r))
export_tasks.bulk_export.post.assert_called_once_with(
['wiki'], 'test.zip', send_email=True, with_attachments=False)
@mock.patch('allura.ext.admin.admin_main.export_tasks')
@mock.patch.dict(tg.config, {'bulk_export_filename': '{project}.zip'})
def test_selected_multiple_tools(self, export_tasks):
r = self.app.post('/admin/export', {'tools': ['wiki', 'wiki2']})
assert_in('ok', self.webflash(r))
export_tasks.bulk_export.post.assert_called_once_with(
['wiki', 'wiki2'], 'test.zip', send_email=True, with_attachments=False)
@mock.patch('allura.model.session.project_doc_session')
def test_export_in_progress(self, session):
from allura.tasks import export_tasks
session.return_value = {'result': [{"total_size": 10000}]}
export_tasks.bulk_export.post(['wiki'])
r = self.app.get('/admin/export')
assert_in('<h2>Busy</h2>', r.text)
@td.with_user_project('test-user')
def test_bulk_export_path_for_user_project(self):
project = M.Project.query.get(shortname='u/test-user')
assert_equals(project.bulk_export_path(tg.config['bulk_export_path']),
'/tmp/bulk_export/u/test-user')
@td.with_user_project('test-user')
def test_bulk_export_filename_for_user_project(self):
project = M.Project.query.get(shortname='u/test-user')
filename = project.bulk_export_filename()
assert filename.startswith('test-user-backup-{}-'.format(datetime.utcnow().year))
assert filename.endswith('.zip')
def test_bulk_export_filename_for_nbhd(self):
project = M.Project.query.get(name='Home Project for Projects')
filename = project.bulk_export_filename()
assert filename.startswith('p-backup-{}-'.format(datetime.utcnow().year))
assert filename.endswith('.zip')
def test_bulk_export_path_for_nbhd(self):
project = M.Project.query.get(name='Home Project for Projects')
assert_equals(project.bulk_export_path(tg.config['bulk_export_path']), '/tmp/bulk_export/p/p')
@mock.patch('allura.model.session.project_doc_session')
def test_export_page_contains_check_all_checkbox(self, session):
session.return_value = {'result': [{"total_size": 10000}]}
r = self.app.get('/admin/export')
assert_in('<input type="checkbox" id="check-all">', r)
assert_in('Check All</label>', r)
class TestRestExport(TestRestApiBase):
@mock.patch('allura.model.project.MonQTask')
def test_export_status(self, MonQTask):
MonQTask.query.get.return_value = None
r = self.api_get('/rest/p/test/admin/export_status')
assert_equals(r.json, {'status': 'ready'})
MonQTask.query.get.return_value = 'something'
r = self.api_get('/rest/p/test/admin/export_status')
assert_equals(r.json, {'status': 'busy'})
@mock.patch('allura.model.project.MonQTask')
@mock.patch('allura.ext.admin.admin_main.AdminApp.exportable_tools_for')
@mock.patch('allura.ext.admin.admin_main.export_tasks.bulk_export')
def test_export_no_exportable_tools(self, bulk_export, exportable_tools, MonQTask):
MonQTask.query.get.return_value = None
exportable_tools.return_value = []
self.api_post('/rest/p/test/admin/export',
tools='tickets, discussion', status=400)
assert_equals(bulk_export.post.call_count, 0)
@mock.patch('allura.model.project.MonQTask')
@mock.patch('allura.ext.admin.admin_main.AdminApp.exportable_tools_for')
@mock.patch('allura.ext.admin.admin_main.export_tasks.bulk_export')
def test_export_no_tools_specified(self, bulk_export, exportable_tools, MonQTask):
MonQTask.query.get.return_value = None
exportable_tools.return_value = [
mock.Mock(options=mock.Mock(mount_point='tickets')),
mock.Mock(options=mock.Mock(mount_point='discussion')),
]
self.api_post('/rest/p/test/admin/export', status=400)
assert_equals(bulk_export.post.call_count, 0)
@mock.patch('allura.model.project.MonQTask')
@mock.patch('allura.ext.admin.admin_main.AdminApp.exportable_tools_for')
@mock.patch('allura.ext.admin.admin_main.export_tasks.bulk_export')
def test_export_busy(self, bulk_export, exportable_tools, MonQTask):
MonQTask.query.get.return_value = 'something'
exportable_tools.return_value = [
mock.Mock(options=mock.Mock(mount_point='tickets')),
mock.Mock(options=mock.Mock(mount_point='discussion')),
]
self.api_post('/rest/p/test/admin/export',
tools='tickets, discussion', status=503)
assert_equals(bulk_export.post.call_count, 0)
@mock.patch('allura.model.project.MonQTask')
@mock.patch('allura.ext.admin.admin_main.AdminApp.exportable_tools_for')
@mock.patch('allura.ext.admin.admin_main.export_tasks.bulk_export')
@mock.patch.dict(tg.config, {'bulk_export_filename': '{project}.zip'})
def test_export_ok(self, bulk_export, exportable_tools, MonQTask):
MonQTask.query.get.return_value = None
exportable_tools.return_value = [
mock.Mock(options=mock.Mock(mount_point='tickets')),
mock.Mock(options=mock.Mock(mount_point='discussion')),
]
r = self.api_post('/rest/p/test/admin/export',
tools='tickets, discussion', status=200)
assert_equals(r.json, {
'filename': 'test.zip',
'status': 'in progress',
})
bulk_export.post.assert_called_once_with(
['tickets', 'discussion'], 'test.zip', send_email=False, with_attachments=False)
class TestRestInstallTool(TestRestApiBase):
def test_missing_mount_info(self):
r = self.api_get('/rest/p/test/')
tools_names = [t['name'] for t in r.json['tools']]
assert 'tickets' not in tools_names
data = {
'tool': 'tickets'
}
r = self.api_post('/rest/p/test/admin/install_tool/', **data)
assert_equals(r.json['success'], False)
assert_equals(r.json['info'], 'All arguments required.')
def test_invalid_tool(self):
r = self.api_get('/rest/p/test/')
tools_names = [t['name'] for t in r.json['tools']]
assert 'tickets' not in tools_names
data = {
'tool': 'something',
'mount_point': 'ticketsmount1',
'mount_label': 'tickets_label1'
}
r = self.api_post('/rest/p/test/admin/install_tool/', **data)
assert_equals(r.json['success'], False)
assert_equals(r.json['info'],
'Incorrect tool name, or limit is reached.')
def test_bad_mount(self):
r = self.api_get('/rest/p/test/')
tools_names = [t['name'] for t in r.json['tools']]
assert 'tickets' not in tools_names
data = {
'tool': 'tickets',
'mount_point': 'tickets_mount1',
'mount_label': 'tickets_label1'
}
r = self.api_post('/rest/p/test/admin/install_tool/', **data)
assert_equals(r.json['success'], False)
assert_equals(r.json['info'],
'Mount point "tickets_mount1" is invalid')
def test_install_tool_ok(self):
r = self.api_get('/rest/p/test/')
tools_names = [t['name'] for t in r.json['tools']]
assert 'tickets' not in tools_names
data = {
'tool': 'tickets',
'mount_point': 'ticketsmount1',
'mount_label': 'tickets_label1'
}
r = self.api_post('/rest/p/test/admin/install_tool/', **data)
assert_equals(r.json['success'], True)
assert_equals(r.json['info'],
'Tool %s with mount_point %s and mount_label %s was created.'
% ('tickets', 'ticketsmount1', 'tickets_label1'))
project = M.Project.query.get(shortname='test')
assert_equals(project.ordered_mounts()
[-1]['ac'].options.mount_point, 'ticketsmount1')
audit_log = M.AuditLog.query.find(
{'project_id': project._id}).sort('_id', -1).first()
assert_equals(audit_log.message, 'install tool ticketsmount1')
def test_tool_exists(self):
with mock.patch.object(ForgeWikiApp, 'max_instances') as mi:
mi.__get__ = mock.Mock(return_value=2)
r = self.api_get('/rest/p/test/')
tools_names = [t['name'] for t in r.json['tools']]
assert 'wiki' not in tools_names
data = {
'tool': 'wiki',
'mount_point': 'wikimount1',
'mount_label': 'wiki_label1'
}
project = M.Project.query.get(shortname='test')
with h.push_config(c, user=M.User.query.get()):
project.install_app('wiki', mount_point=data['mount_point'])
r = self.api_post('/rest/p/test/admin/install_tool/', **data)
assert_equals(r.json['success'], False)
assert_equals(r.json['info'], 'Mount point already exists.')
def test_tool_installation_limit(self):
with mock.patch.object(ForgeWikiApp, 'max_instances') as mi:
mi.__get__ = mock.Mock(return_value=1)
r = self.api_get('/rest/p/test/')
tools_names = [t['name'] for t in r.json['tools']]
assert 'wiki' not in tools_names
data = {
'tool': 'wiki',
'mount_point': 'wikimount',
'mount_label': 'wiki_label'
}
r = self.api_post('/rest/p/test/admin/install_tool/', **data)
assert_equals(r.json['success'], True)
data['mount_point'] = 'wikimount1'
data['mount_label'] = 'wiki_label1'
r = self.api_post('/rest/p/test/admin/install_tool/', **data)
assert_equals(r.json['success'], False)
assert_equals(r.json['info'],
'Incorrect tool name, or limit is reached.')
def test_unauthorized(self):
r = self.api_get('/rest/p/test/')
tools_names = [t['name'] for t in r.json['tools']]
assert 'tickets' not in tools_names
data = {
'tool': 'wiki',
'mount_point': 'wikimount1',
'mount_label': 'wiki_label1'
}
r = self.app.post('/rest/p/test/admin/install_tool/',
extra_environ={'username': str('*anonymous')},
status=401,
params=data)
assert_equals(r.status, '401 Unauthorized')
def test_order(self):
def get_labels():
project = M.Project.query.get(shortname='test')
labels = []
for mount in project.ordered_mounts(include_hidden=True):
if 'ac' in mount:
labels.append(mount['ac'].options.mount_label)
elif 'sub' in mount:
labels.append(mount['sub'].name)
return labels
assert_equals(get_labels(),
['Admin', 'Search', 'Activity', 'A Subproject'])
data = [
{
'tool': 'tickets',
'mount_point': 'ticketsmount1',
'mount_label': 'ta',
},
{
'tool': 'tickets',
'mount_point': 'ticketsmount2',
'mount_label': 'tc',
'order': 'last'
},
{
'tool': 'tickets',
'mount_point': 'ticketsmount3',
'mount_label': 'tb',
'order': 'alpha_tool'
},
{
'tool': 'tickets',
'mount_point': 'ticketsmount4',
'mount_label': 't1',
'order': 'first'
},
]
for datum in data:
r = self.api_post('/rest/p/test/admin/install_tool/', **datum)
assert_equals(r.json['success'], True)
assert_equals(r.json['info'],
'Tool %s with mount_point %s and mount_label %s was created.'
% (datum['tool'], datum['mount_point'], datum['mount_label']))
assert_equals(
get_labels(), ['t1', 'Admin', 'Search', 'Activity', 'A Subproject', 'ta', 'tb', 'tc'])
class TestRestAdminOptions(TestRestApiBase):
def test_no_mount_point(self):
r = self.api_get('/rest/p/test/admin/admin_options/', status=400)
assert_in('Must provide a mount point', r.text)
def test_invalid_mount_point(self):
r = self.api_get('/rest/p/test/admin/admin_options/?mount_point=asdf', status=400)
assert_in('The mount point you provided was invalid', r.text)
@td.with_tool('test', 'Git', 'git')
def test_valid_mount_point(self):
r = self.api_get('/rest/p/test/admin/admin_options/?mount_point=git', status=200)
assert_is_not_none(r.json['options'])
class TestRestMountOrder(TestRestApiBase):
def test_no_kw(self):
r = self.api_post('/rest/p/test/admin/mount_order/', status=400)
assert_in('Expected kw params in the form of "ordinal: mount_point"', r.text)
def test_invalid_kw(self):
data = {'1': 'git', 'two': 'admin'}
r = self.api_post('/rest/p/test/admin/mount_order/', status=400, **data)
assert_in('Invalid kw: expected "ordinal: mount_point"', r.text)
@td.with_wiki
def test_reorder(self):
d1 = {
'0': 'sub1',
'1': 'wiki',
'2': 'admin'
}
d2 = {
'0': 'wiki',
'1': 'sub1',
'2': 'admin'
}
tool = {
'icon': 'tool-admin',
'is_anchored': False,
'mount_point': 'sub1',
'name': 'A Subproject',
'tool_name': 'sub',
'url': '/p/test/sub1/'
}
# Set initial order to d1
r = self.api_post('/rest/p/test/admin/mount_order/', **d1)
assert_equals(r.json['status'], 'ok')
# Get index of sub1
a = self.api_get('/p/test/_nav.json').json['menu'].index(tool)
# Set order to d2
r = self.api_post('/rest/p/test/admin/mount_order/', **d2)
assert_equals(r.json['status'], 'ok')
# Get index of sub1 after reordering
b = self.api_get('/p/test/_nav.json').json['menu'].index(tool)
assert_greater(b, a)
class TestRestToolGrouping(TestRestApiBase):
def test_invalid_grouping_threshold(self):
for invalid_value in ('100', 'asdf'):
r = self.api_post('/rest/p/test/admin/configure_tool_grouping/', grouping_threshold=invalid_value,
status=400)
assert_in('Invalid threshold. Expected a value between 1 and 10', r.text)
@td.with_wiki
@td.with_tool('test', 'Wiki', 'wiki2')
@td.with_tool('test', 'Wiki', 'wiki3')
def test_valid_grouping_threshold(self):
# Set threshold to 2
r = self.api_post('/rest/p/test/admin/configure_tool_grouping/', grouping_threshold='2', status=200)
# The 'wiki' mount_point should not exist at the top level
result1 = self.app.get('/p/test/_nav.json')
assert_not_in('wiki', [tool['mount_point'] for tool in result1.json['menu']])
# Set threshold to 3
r = self.api_post('/rest/p/test/admin/configure_tool_grouping/', grouping_threshold='3', status=200)
# The wiki mount_point should now be at the top level of the menu
result2 = self.app.get('/p/test/_nav.json')
assert_in('wiki', [tool['mount_point'] for tool in result2.json['menu']])
class TestInstallableTools(TestRestApiBase):
def test_installable_tools_response(self):
r = self.api_get('/rest/p/test/admin/installable_tools', status=200)
assert_in('External Link', [tool['tool_label'] for tool in r.json['tools']])
| 45.109375 | 110 | 0.578849 |
efa7613ee373901018494fbd09230fa4f98ca8a5 | 4,656 | py | Python | vitrage-4.3.1/vitrage/datasources/doctor/driver.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | null | null | null | vitrage-4.3.1/vitrage/datasources/doctor/driver.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | vitrage-4.3.1/vitrage/datasources/doctor/driver.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | # Copyright 2016 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import namedtuple
from oslo_log import log
from vitrage.common.constants import DatasourceAction
from vitrage.common.constants import DatasourceProperties as DSProps
from vitrage.common.constants import EventProperties as EventProps
from vitrage.datasources.alarm_driver_base import AlarmDriverBase
from vitrage.datasources.doctor import DOCTOR_DATASOURCE
from vitrage.datasources.doctor.properties import DoctorDetails
from vitrage.datasources.doctor.properties import DoctorProperties \
as DoctorProps
from vitrage.datasources.doctor.properties import DoctorStatus
from vitrage.datasources.doctor.properties import get_detail
LOG = log.getLogger(__name__)
class DoctorDriver(AlarmDriverBase):
AlarmKey = namedtuple('AlarmKey', ['alarm_name', 'hostname'])
def __init__(self, conf):
super(DoctorDriver, self).__init__()
self.conf = conf
self._client = None
def _vitrage_type(self):
return DOCTOR_DATASOURCE
def _alarm_key(self, alarm):
return self.AlarmKey(alarm_name=alarm[EventProps.TYPE],
hostname=get_detail(alarm,
DoctorDetails.HOSTNAME))
def _is_erroneous(self, alarm):
return alarm and \
get_detail(alarm, DoctorDetails.STATUS) != DoctorStatus.UP
def _is_valid(self, alarm):
if not alarm or EventProps.TIME not in alarm or \
EventProps.TYPE not in alarm or \
EventProps.DETAILS not in alarm:
return False
details = alarm[EventProps.DETAILS]
return DoctorDetails.STATUS in details and \
DoctorDetails.HOSTNAME in details
def _status_changed(self, new_alarm, old_alarm):
return get_detail(old_alarm, DoctorDetails.STATUS) != \
get_detail(new_alarm, DoctorDetails.STATUS)
def get_all(self, datasource_action):
# pulling alarms is not supported in Doctor monitor
return self.make_pickleable([], DOCTOR_DATASOURCE, datasource_action)
def get_changes(self, datasource_action):
# pulling alarms is not supported in Doctor monitor
return self.make_pickleable([], DOCTOR_DATASOURCE, datasource_action)
def enrich_event(self, event, event_type):
"""Enrich the given event
:param event: dictionary of this form:
{
'time': '2016-04-12T08:00:00.12345',
'type': 'compute.host.down',
'details': {
'hostname': 'compute-1',
'source': 'sample_monitor',
'cause': 'link-down',
'severity': 'critical',
'status': 'down',
'monitor_id': 'monitor-1',
'monitor_event_id': '123',
}
}
:param event_type: always 'compute.host.down'
:return: the same event, with the following changes:
- DoctorProps.UPDATE_TIME - the event 'time' if it is new, or the
update time of the same event if it is already cached
"""
LOG.debug('Going to enrich event: %s', event)
event[DSProps.EVENT_TYPE] = event[EventProps.TYPE]
old_alarm = self._old_alarm(event)
if old_alarm and not self._status_changed(old_alarm, event):
event[DoctorProps.UPDATE_TIME] = old_alarm[DoctorProps.UPDATE_TIME]
else:
event[DoctorProps.UPDATE_TIME] = event[EventProps.TIME]
event = self._filter_and_cache_alarm(event, old_alarm,
self._filter_get_erroneous,
event[EventProps.TIME])
LOG.debug('Enriched event: %s', event)
if event:
return DoctorDriver.make_pickleable([event], DOCTOR_DATASOURCE,
DatasourceAction.UPDATE)[0]
@staticmethod
def get_event_types():
return [DoctorProps.CUSTOM_EVENT_DOWN,
DoctorProps.CUSTOM_EVENT_UP]
| 38.163934 | 79 | 0.643256 |
3f2ca313b5eaba90b4257f98c1445ed337ce4034 | 581 | py | Python | leetcode/1155.py | Cannizza-zzk/python_review | 5a04b3dbc8baa835780c039386529e20e69af81c | [
"Apache-2.0"
] | null | null | null | leetcode/1155.py | Cannizza-zzk/python_review | 5a04b3dbc8baa835780c039386529e20e69af81c | [
"Apache-2.0"
] | null | null | null | leetcode/1155.py | Cannizza-zzk/python_review | 5a04b3dbc8baa835780c039386529e20e69af81c | [
"Apache-2.0"
] | null | null | null | class Solution:
def __init__(self) -> None:
self.mod = 10**9 + 7
self.dp ={}
def numRollsToTarget(self, d: int, f: int, target: int) -> int:
if (d, target) in self.dp:
return self.dp[(d,target)]
elif d == 0:
self.dp[(d,target)] = 0 if target > 0 else 1
return self.dp[(d,target)]
else:
ans = 0
for i in range(max(0,target -f),target):
ans += self.numRollsToTarget(d - 1,f,i)
self.dp[(d,target)] = ans
return ans % self.mod | 32.277778 | 67 | 0.475043 |
d9b5391e0868649be571b14f411f1c335980a180 | 1,787 | py | Python | ocdskingfisherprocess/cli/commands/transform_collections.py | aguilerapy/kingfisher-process | 0c588076a47ecef99b195bb546903ed8125bf06a | [
"BSD-3-Clause"
] | null | null | null | ocdskingfisherprocess/cli/commands/transform_collections.py | aguilerapy/kingfisher-process | 0c588076a47ecef99b195bb546903ed8125bf06a | [
"BSD-3-Clause"
] | 1 | 2019-04-22T21:17:07.000Z | 2019-04-22T21:17:07.000Z | ocdskingfisherprocess/cli/commands/transform_collections.py | aguilerapy/kingfisher-process | 0c588076a47ecef99b195bb546903ed8125bf06a | [
"BSD-3-Clause"
] | null | null | null | import ocdskingfisherprocess.database
import ocdskingfisherprocess.cli.commands.base
from ocdskingfisherprocess.transform.util import get_transform_instance
import datetime
from threading import Timer
import os
class TransformCollectionsCLICommand(ocdskingfisherprocess.cli.commands.base.CLICommand):
command = 'transform-collections'
def configure_subparser(self, subparser):
subparser.add_argument("--runforseconds",
help="Run for this many seconds only.")
def run_command(self, args):
run_until_timestamp = None
run_for_seconds = int(args.runforseconds) if args.runforseconds else 0
if run_for_seconds > 0:
run_until_timestamp = datetime.datetime.utcnow().timestamp() + run_for_seconds
# This is a safeguard - the process should stop itself but this will kill it if it does not.
def exitfunc():
os._exit(0)
Timer(run_for_seconds + 60, exitfunc).start()
for collection in self.database.get_all_collections():
if collection.transform_type:
if not args.quiet:
print("Collection " + str(collection.database_id))
transform = get_transform_instance(collection.transform_type, self.config, self.database,
collection, run_until_timestamp=run_until_timestamp)
transform.process()
# Early return?
if run_until_timestamp and run_until_timestamp < datetime.datetime.utcnow().timestamp():
break
# If the code above took less than 60 seconds the process will stay open, waiting for the Timer to execute.
# So just kill it to make sure.
os._exit(0)
| 42.547619 | 115 | 0.658086 |
e4c71b837b1c3d21a1ca8a396b27c7b4ff759502 | 448 | py | Python | WebMirror/management/rss_parser_funcs/feed_parse_extractEdentranslationsblogWordpressCom.py | fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 193 | 2016-08-02T22:04:35.000Z | 2022-03-09T20:45:41.000Z | WebMirror/management/rss_parser_funcs/feed_parse_extractEdentranslationsblogWordpressCom.py | fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 533 | 2016-08-23T20:48:23.000Z | 2022-03-28T15:55:13.000Z | WebMirror/management/rss_parser_funcs/feed_parse_extractEdentranslationsblogWordpressCom.py | rrosajp/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 19 | 2015-08-13T18:01:08.000Z | 2021-07-12T17:13:09.000Z | def extractEdentranslationsblogWordpressCom(item):
'''
Parser for 'edentranslationsblog.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
if "School Beauty Personal Bodyguard" in item['tags']:
return buildReleaseMessageWithType(item, "School Beauty Personal Bodyguard", vol, chp, frag=frag, postfix=postfix)
return False | 34.461538 | 116 | 0.756696 |
f273a85ea70b7279dc0e6084d569b5688347cd34 | 88,142 | py | Python | python/afdko/ttxn.py | kanou-h/afdko | 875042f2f8ffa6da088893f57dd0b5b614cc560b | [
"Apache-2.0"
] | null | null | null | python/afdko/ttxn.py | kanou-h/afdko | 875042f2f8ffa6da088893f57dd0b5b614cc560b | [
"Apache-2.0"
] | null | null | null | python/afdko/ttxn.py | kanou-h/afdko | 875042f2f8ffa6da088893f57dd0b5b614cc560b | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
# Copyright 2014 Adobe. All Rights Reserved.
"""
How to build a feature-file format.
1) Traverse script->language->feature tree, building up list as we go.
These are all expected to be in alphabetic order in the font.
Add each feature entry as FeatDict[tag][lang_sys].append(index]), and as
FeatDict[index] .append(lang_sys)
If there is no DFLT script, then step through all entries of FeatDict[index].
Add FeatDict[tag]["DFLT_dflt"].append(index) for all FeatDict[index] where the
number of lang_sys entries is the same as the number of lang_sys values.
Write out the language systems.
Collect and write the class defs.
For each lookup in lookup list: for each class def in the lookup:
add entry to ClassesByNameDict[<glyph list as tuple>].append(lookup index,
subtable_index, class index)
Get list of keys for ClassesByNameDict. Sort. MakeClassNameDict.
For each keys for ClassesByNameDict:
make class name from <firstglyph_lastglyph>.
while class name is in ClassesByNameDict:
find glyph in current class that is not in previous class, and
make name firstglyph_diff-glyph_last-glyph
add entry to ClassesByLookup[lookup index, subtable_index, class index)] =
class name.
Same for mark classes.
Write out class defs.
Write out the feature list.
Make lookupDict = {}
Get feat tag list. For each tag:
note if DFLT dflt lang sys has any lookups.
for each lang_sys ( by definition, starts with DFLT, then the rest in
alpha order).
If script is DFLT:
don't write script/lang
build list of lookups for DFLT script.
else:
get union of lookup indicies in all feat indicies
Remove all the default lookups from the lookup list:
if no lookups left, continue
if there was no intersection with default lookups, set exclude
keyword
if the script is same as last one, write language only,
else write script and language.
if lookupIndex is in lookupDict:
just write lookup references
else:
write lookup definition with name "feat_script_lang_new_index"
"""
from __future__ import print_function, absolute_import
__help__ = """
ttxn v1.21.0 Aug 30 2018
Based on the ttx tool, with the same options, except that it is limited to
dumping, and cannot compile. Makes a normalized dump of the font, or of
selected tables.
Adds additional options:
-nh : suppresses hints in the glyph outline dump.
-nv : suppress version info in output: head.fontRevision and name ID 3.
-se : show if lookup is wrapped in an Extension lookup type;
by default, this information is suppressed.
"""
import sys
from fontTools import ttx
from fontTools.ttLib import TTFont, tagToXML, TTLibError
from fontTools.ttLib.tables.DefaultTable import DefaultTable
from fontTools.misc.loggingTools import Timer
from fontTools.misc.py23 import open, basestring
import copy
import subprocess
import re
import collections
import textwrap
import platform
import getopt
import logging
from afdko.fdkutils import get_temp_file_path
log = logging.getLogger('fontTools.ttx')
curSystem = platform.system()
TX_TOOL = 'tx'
INDENT = " "
ChainINDENT = INDENT + "C "
MAX_LINE_LENGTH = 80
gtextWrapper = textwrap.TextWrapper(width=MAX_LINE_LENGTH,
subsequent_indent=INDENT + INDENT)
NULL_VALUE_RECORD = "<0 0 0 0>"
def getValueRecord(a, b, c, d):
return "<%s %s %s %s>" % (a, b, c, d)
def getAnchorString(anchorTable):
if not anchorTable:
return "<anchor NULL>"
tokenList = ["<anchor", "%d" % anchorTable.XCoordinate,
"%d" % anchorTable.YCoordinate]
if anchorTable.Format == 2:
tokenList.append("%d" % anchorTable.AnchorPoint)
elif anchorTable.Format == 3:
tokenList.append("%d" % anchorTable.XDeviceTable)
tokenList.append("%d" % anchorTable.YDeviceTable)
return " ".join(tokenList) + ">"
class ClassRecord:
def __init__(self, lookupIndex, subtableIndex, classIndex, side="",
anchor=None):
self.lookupIndex = lookupIndex
self.subtableIndex = subtableIndex
self.classIndex = classIndex
self.side = side # None if not a kern pair, else "Left" or "Right"
self.anchor = anchor
def __repr__(self):
return "(lookup %s subtable %s class %s side %s anchor %s)" % (
self.lookupIndex, self.subtableIndex, self.classIndex, self.side,
self.anchor)
def addClassDef(otlConv, classDefs, coverage, side=None, anchor=None):
classDict = {}
glyphDict = copy.deepcopy(otlConv.glyphDict)
for name, classIndex in classDefs.items():
del glyphDict[name]
try:
classDict[classIndex].append(name)
except KeyError:
classDict[classIndex] = [name]
classZeroList = None
if coverage:
if 0 not in classDict:
classZeroList = sorted(glyphDict.keys())
i = len(classZeroList)
while i > 0:
i -= 1
name = classZeroList[i]
if not (name in coverage.glyphs):
classZeroList.remove(name)
if classZeroList:
classDict[0] = classZeroList
else:
classDict[0] = []
for classIndex, nameList in classDict.items():
key = tuple(sorted(nameList))
classRec = ClassRecord(otlConv.curLookupIndex,
otlConv.curSubTableIndex, classIndex, side)
otlConv.classesByNameList[key].append(classRec)
def AddMarkClassDef(otlConv, markCoverage, markArray, tag):
markGlyphList = markCoverage.glyphs
markClass = collections.defaultdict(list)
for m, markGlyph in enumerate(markGlyphList):
markRec = markArray.MarkRecord[m]
markClass[markRec.Class].append(
(getAnchorString(markRec.MarkAnchor), markGlyph))
# Get the mark class names from the global dict
for c, valueList in markClass.items():
anchorDict = collections.defaultdict(list)
for anchor, glyph in valueList:
anchorDict[anchor].append(glyph)
defList = []
for anchor, glyphList in anchorDict.items():
defList.append((tuple(sorted(glyphList)), anchor))
classRec = ClassRecord(otlConv.curLookupIndex,
otlConv.curSubTableIndex, c, tag)
otlConv.markClassesByDefList[tuple(sorted(defList))].append(classRec)
def classPairGPOS(subtable, otlConv=None):
addClassDef(otlConv, subtable.ClassDef1.classDefs, subtable.Coverage,
otlConv.leftSideTag)
addClassDef(otlConv, subtable.ClassDef2.classDefs, None,
otlConv.rightSideTag)
def markClassPOS(subtable, otlConv=None):
AddMarkClassDef(otlConv, subtable.MarkCoverage, subtable.MarkArray,
otlConv.markTag)
def markLigClassPOS(subtable, otlConv=None):
AddMarkClassDef(otlConv, subtable.MarkCoverage, subtable.MarkArray,
otlConv.markTag)
def markMarkClassPOS(subtable, otlConv=None):
AddMarkClassDef(otlConv, subtable.Mark1Coverage, subtable.Mark1Array,
otlConv.mark1Tag)
def classContext(subtable, otlConv):
addClassDef(otlConv, subtable.ClassDef.classDefs, None)
def classChainContext(subtable, otlConv):
for class_def_name in ('BacktrackClassDef', 'InputClassDef',
'LookAheadClassDef'):
class_def = getattr(subtable, class_def_name)
if class_def:
addClassDef(otlConv, class_def.classDefs, None, class_def_name)
def classExt(subtable, otlConv):
handler = otlConv.classHandlers.get(
(subtable.ExtensionLookupType, subtable.ExtSubTable.Format), None)
if handler:
handler(subtable.ExtSubTable, otlConv)
class ContextRecord:
def __init__(self, inputList, sequenceIndex):
self.inputList = inputList
self.sequenceIndex = sequenceIndex
self.glyphsUsed = 0
self.result = ""
def checkGlyphInSequence(glyphName, contextSequence, i):
retVal = 0
try:
input_seq = contextSequence[i]
if isinstance(input_seq, list):
if glyphName in input_seq:
retVal = 1
elif isinstance(input_seq, basestring):
if glyphName == input_seq:
retVal = 1
except IndexError:
pass
return retVal
def ruleSinglePos(subtable, otlConv, context=None):
rules = []
noMatchRules = [] # contains the rules that don't match the context
if context:
indent = ChainINDENT
inputSeqList = context.inputList[context.sequenceIndex:]
else:
indent = ""
inputSeqList = []
if subtable.Format == 1:
valueString = getValRec(subtable.ValueFormat, subtable.Value)
if valueString and valueString != "0" and (
valueString != NULL_VALUE_RECORD):
for tglyph in subtable.Coverage.glyphs:
rule = note = None
if context and (not checkGlyphInSequence(
tglyph, inputSeqList, 0)):
note = " # Note! Not in input sequence"
else:
note = ""
rule = "%spos %s %s;%s" % (indent, tglyph, valueString, note)
if context and note:
noMatchRules.append(rule)
else:
rules.append(rule)
elif subtable.Format == 2:
for i, subtable_value in enumerate(subtable.Value):
rule = note = None
tglyph = subtable.Coverage.glyphs[i]
valueString = getValRec(subtable.ValueFormat, subtable_value)
if valueString and valueString != "0" and (
valueString != NULL_VALUE_RECORD):
if context and (tglyph not in inputSeqList):
note = " # Note! Not in input sequence"
else:
note = ""
rule = "%spos %s %s;%s" % (indent, tglyph, valueString, note)
if context and note:
noMatchRules.append(rule)
else:
rules.append(rule)
else:
raise AttributeError("Unknown Single Pos format")
if rules:
return sorted(rules)
else:
return sorted(noMatchRules)
def rulePairPos(subtable, otlConv, context=None):
rules = []
noMatchRules = [] # contains the rules that don't match the context
if context:
indent = ChainINDENT
inputSeqList = context.inputList[context.sequenceIndex:]
else:
indent = ""
inputSeqList = []
if subtable.Format == 1:
firstGlyphList = subtable.Coverage.glyphs
for i, pairSet in enumerate(subtable.PairSet):
g1 = firstGlyphList[i]
for pairValueRec in pairSet.PairValueRecord:
rule = note = None
g2 = pairValueRec.SecondGlyph
note = ""
if context:
note = " # Note! Not in input sequence"
if pairValueRec.Value1:
valueString = getValRec(subtable.ValueFormat1,
pairValueRec.Value1)
if valueString and valueString != "0" and (
valueString != NULL_VALUE_RECORD):
rule = "%spos %s %s %s%s;" % (indent, g1, g2,
valueString, note)
if pairValueRec.Value2:
valueString = getValRec(subtable.ValueFormat1,
pairValueRec.Value2)
if valueString and valueString != "0" and (
valueString != NULL_VALUE_RECORD):
note = ("%s # Warning: non-zero pos value record for "
"second glyph" % note)
rule = "%spos %s %s %s;%s" % (indent, g1, g2,
valueString, note)
if rule:
if context and note:
noMatchRules.append(rule)
else:
rules.append(rule)
elif subtable.Format == 2:
for i, classRec1 in enumerate(subtable.Class1Record):
# if this class reference exists it has to be in classesByLookup.
g1 = otlConv.classesByLookup[
otlConv.curLookupIndex, otlConv.curSubTableIndex, i,
otlConv.leftSideTag]
for j, classRec2 in enumerate(classRec1.Class2Record):
rule = note = None
g2 = otlConv.classesByLookup[
otlConv.curLookupIndex, otlConv.curSubTableIndex, j,
otlConv.rightSideTag]
if context:
if checkGlyphInSequence(g1, inputSeqList, 0) and (
checkGlyphInSequence(g2, inputSeqList, 1)):
note = ""
else:
note = " # Note! Not in input sequence"
else:
note = ""
if classRec2.Value1:
valueString = getValRec(subtable.ValueFormat1,
classRec2.Value1)
if valueString and valueString != "0":
rule = "%spos %s %s %s;%s" % (indent, g1, g2,
valueString, note)
if classRec2.Value2:
valueString = getValRec(subtable.ValueFormat1,
classRec2.Value2)
if valueString and valueString != "0":
note = ("%s # Warning: non-zero pos value record for "
"second glyph" % note)
rule = "%spos %s %s %s;%s" % (indent, g1, g2,
valueString, note)
if rule:
if context and note:
noMatchRules.append(rule)
else:
rules.append(rule)
else:
raise AttributeError("Unknown Pair Pos format")
if rules:
return sorted(rules)
else:
return sorted(noMatchRules)
def ruleCursivePos(subtable, otlConv, context=None):
rules = []
noMatchRules = [] # contains the rules that don't match the context
if context:
indent = ChainINDENT
inputSeqList = context.inputList[context.sequenceIndex:]
else:
indent = ""
inputSeqList = []
cursiveGlyphList = subtable.Coverage.glyphs
for i, eRec in enumerate(subtable.EntryExitRecord):
rule = note = None
tglyph = cursiveGlyphList[i]
if context and (not checkGlyphInSequence(tglyph, inputSeqList, 0)):
note = " # Note! Not in input sequence"
else:
note = ""
anchor1 = getAnchorString(eRec.EntryAnchor)
anchor2 = getAnchorString(eRec.ExitAnchor)
rule = "%spos cursive %s %s %s;%s" % (indent, tglyph, anchor1, anchor2,
note)
if context and note:
noMatchRules.append(rule)
else:
rules.append(rule)
if rules:
return sorted(rules)
else:
return sorted(noMatchRules)
def ruleMarkBasePos(subtable, otlConv, context=None):
rules = []
noMatchRules = [] # contains the rules that don't match the context
if context:
indent = ChainINDENT
inputSeqList = context.inputList[context.sequenceIndex:]
else:
indent = ""
inputSeqList = []
subsequentIndent = indent + INDENT
markGlyphList = subtable.MarkCoverage.glyphs
baseGlyphList = subtable.BaseCoverage.glyphs
markClass = collections.defaultdict(list)
classCount = len(set(v.Class for v in subtable.MarkArray.MarkRecord))
for m, markGlyph in enumerate(markGlyphList):
markRec = subtable.MarkArray.MarkRecord[m]
markClass[markRec.Class].append((getAnchorString(markRec.MarkAnchor),
markGlyph))
# Get the mark class names from the global dict
markClassNameList = []
for c in markClass.keys():
markClassName = otlConv.markClassesByLookup[otlConv.curLookupIndex,
otlConv.curSubTableIndex,
c, otlConv.markTag]
markClassNameList.append(markClassName)
# build a dict of [common set of anchors] -> glyph list.
baseAnchorSets = collections.defaultdict(list)
for b, baseGlyph in enumerate(baseGlyphList):
baseRec = subtable.BaseArray.BaseRecord[b]
anchorList = []
for c in range(classCount):
anchorList.append(getAnchorString(baseRec.BaseAnchor[c]))
anchorKey = tuple(anchorList)
baseAnchorSets[anchorKey].append(baseGlyph)
for anchorKey, glyphList in baseAnchorSets.items():
note = ""
glyphList.sort()
if context:
if isinstance(inputSeqList[0], list):
inputList = inputSeqList[0]
else:
inputList = [inputSeqList[0]]
overlapList = set(glyphList).intersection(inputList)
if not overlapList:
note = " # Note! Not in input sequence"
glyphTxt = " ".join(glyphList)
else:
glyphTxt = " ".join(sorted(overlapList))
else:
glyphTxt = " ".join(glyphList)
pos_ind = '\n' + INDENT * 4
rule = ["pos base [%s]" % pos_ind.join(gtextWrapper.wrap(glyphTxt))]
for cl in range(classCount):
rule.append(" %s mark %s" % (anchorKey[cl], markClassNameList[cl]))
if (cl + 1) < classCount: # if it is not the last one
rule.append("\n%s" % subsequentIndent)
rule.append(";")
if note:
rule.append(note)
rule = "".join(rule)
if context and note:
noMatchRules.append(rule)
else:
rules.append(rule)
if rules:
return sorted(rules)
else:
return sorted(noMatchRules)
def ruleMarkLigPos(subtable, otlConv, context=None):
rules = []
noMatchRules = [] # contains the rules that don't match the context
markGlyphList = subtable.MarkCoverage.glyphs
ligGlyphList = subtable.LigatureCoverage.glyphs
markClass = collections.defaultdict(list)
classCount = len(set(v.Class for v in subtable.MarkArray.MarkRecord))
for m, markGlyph in enumerate(markGlyphList):
markRec = subtable.MarkArray.MarkRecord[m]
markClass[markRec.Class].append((getAnchorString(markRec.MarkAnchor),
markGlyph))
# Get the mark class names from the global dict
markClassNameList = []
for c in markClass.keys():
markClassName = otlConv.markClassesByLookup[otlConv.curLookupIndex,
otlConv.curSubTableIndex,
c, otlConv.markTag]
markClassNameList.append(markClassName)
if context:
indent = ChainINDENT
inputSeqList = context.inputList[context.sequenceIndex:]
else:
indent = ""
inputSeqList = []
subsequentIndent = indent + INDENT
for l, ligAttach in enumerate(subtable.LigatureArray.LigatureAttach):
ligGlyph = ligGlyphList[l]
tokenList = ["pos ligature ", ligGlyph]
if context and (not checkGlyphInSequence(ligGlyph, inputSeqList, 0)):
note = " # Note! Not in input sequence"
else:
note = ""
for cmpIndex, componentRec in enumerate(ligAttach.ComponentRecord):
if cmpIndex > 0:
tokenList.append("\n%sligComponent" % (subsequentIndent * 5))
for cl in range(classCount):
tokenList.append(" %s mark %s" % (
getAnchorString(componentRec.LigatureAnchor[cl]),
markClassNameList[cl]))
if (cl + 1) < classCount:
tokenList.append("\n%s" % subsequentIndent)
tokenList.append(";")
if note:
tokenList.append(note)
rule = "".join(tokenList)
if context and note:
noMatchRules.append(rule)
else:
rules.append(rule)
if rules:
return sorted(rules)
else:
return sorted(noMatchRules)
def ruleMarkMarkPos(subtable, otlConv, context=None):
rules = []
noMatchRules = [] # contains the rules that don't match the context
if context:
indent = ChainINDENT
inputSeqList = context.inputList[context.sequenceIndex:]
else:
indent = ""
inputSeqList = []
subsequentIndent = indent + INDENT
markGlyphList = subtable.Mark1Coverage.glyphs
mark2GlyphList = subtable.Mark2Coverage.glyphs
markClass = collections.defaultdict(list)
classCount = len(set(v.Class for v in subtable.Mark1Array.MarkRecord))
for m, markGlyph in enumerate(markGlyphList):
markRec = subtable.Mark1Array.MarkRecord[m]
markClass[markRec.Class].append((getAnchorString(markRec.MarkAnchor),
markGlyph))
# Get the mark class names from the global dict
markClassNameList = []
for c in markClass.keys():
markClassName = otlConv.markClassesByLookup[otlConv.curLookupIndex,
otlConv.curSubTableIndex,
c, otlConv.mark1Tag]
markClassNameList.append(markClassName)
mark2AnchorSets = collections.defaultdict(list)
for b, mark2Glyph in enumerate(mark2GlyphList):
mark2Rec = subtable.Mark2Array.Mark2Record[b]
anchorList = []
for c in range(classCount):
anchorList.append(getAnchorString(mark2Rec.Mark2Anchor[c]))
anchorKey = tuple(anchorList)
mark2AnchorSets[anchorKey].append(mark2Glyph)
for anchorKey, glyphList in mark2AnchorSets.items():
note = None
# This sort will lead the rules to be sorted by base glyph names.
glyphList.sort()
if context:
if isinstance(inputSeqList[0], list):
inputList = inputSeqList[0]
else:
inputList = [inputSeqList[0]]
overlapList = set(glyphList).intersection(inputList)
if not overlapList:
note = " # Note! Not in input sequence"
glyphTxt = " ".join(glyphList)
else:
note = ""
glyphTxt = " ".join(overlapList)
else:
note = ""
glyphTxt = " ".join(glyphList)
rule = ["%spos mark [%s]" % (indent, glyphTxt)]
for cl in range(classCount):
rule.append(" %s mark %s" % (anchorKey[cl], markClassNameList[cl]))
if (cl + 1) < classCount: # if it is not the last one
rule.append("\n%s" % subsequentIndent)
rule.append(";")
if note:
rule.append(note)
rule = "".join(rule)
if context and note:
noMatchRules.append(rule)
else:
rules.append(rule)
if rules:
return sorted(rules)
else:
return sorted(noMatchRules)
def ruleContextPOS(subtable, otlConv, context=None):
chainRules = []
# pLookupList = otlConv.table.LookupList.Lookup
# ruleContextPOS 7-3
# XXX support is incomplete
if subtable.Format == 3:
log.warn('Support for GPOS LookupType 7, Format 3 is incomplete')
inputList = []
inputList2 = []
for input_coverage in subtable.Coverage:
glyphList = sorted(input_coverage.glyphs)
for i in reversed(range(1, len(glyphList))):
if glyphList[i - 1] == glyphList[i]:
del glyphList[i]
inputList2.append(glyphList)
inputList.append("[" + " ".join(glyphList) + "]")
inputTxt = "' ".join(inputList) + "'"
rule = "pos %s;" % inputTxt
posRules = []
# for subRec in subtable.PosLookupRecord:
# lookup = pLookupList[subRec.LookupListIndex]
# lookupType = lookup.LookupType
# curLI = otlConv.curLookupIndex
# otlConv.curLookupIndex = subRec.LookupListIndex
# handler = otlConv.ruleHandlers[lookupType]
# contextRec = ContextRecord(inputList2, subRec.SequenceIndex)
# for si, sub_table in enumerate(lookup.SubTable):
# curSI = otlConv.curSubTableIndex
# otlConv.curSubTableIndex = si
# subtablerules = handler(sub_table, otlConv, contextRec)
# otlConv.curSubTableIndex = curSI
# posRules.extend(subtablerules)
# otlConv.curLookupIndex = curLI
chainRules.append([rule, posRules])
# ruleContextPOS 7-2
# XXX support is incomplete
elif subtable.Format == 2:
log.warn('Support for GPOS LookupType 7, Format 2 is incomplete')
for i, ctxClassSet in enumerate(subtable.PosClassSet):
if not ctxClassSet:
continue
for ctxClassRule in ctxClassSet.PosClassRule:
className = otlConv.classesByLookup[
otlConv.curLookupIndex, otlConv.curSubTableIndex, i, None]
inputList = [className]
inputList2 = [otlConv.classesByClassName[className]]
for classIndex in ctxClassRule.Class:
className = otlConv.classesByLookup[
otlConv.curLookupIndex, otlConv.curSubTableIndex,
classIndex, None]
inputList.append(className)
inputList2.append(otlConv.classesByClassName[className])
inputTxt = "' ".join(inputList) + "'"
rule = "pos %s;" % inputTxt
posRules = []
# for subsRec in ctxClassRule.PosLookupRecord:
# lookup = pLookupList[subsRec.LookupListIndex]
# lookupType = lookup.LookupType
# curLI = otlConv.curLookupIndex
# otlConv.curLookupIndex = subsRec.LookupListIndex
# handler = otlConv.ruleHandlers[lookupType]
# contextRec = ContextRecord(
# inputList, subsRec.SequenceIndex)
# for si, sub_table in enumerate(lookup.SubTable):
# curSI = otlConv.curSubTableIndex
# otlConv.curSubTableIndex = si
# subtablerules = handler(
# sub_table, otlConv, contextRec)
# otlConv.curSubTableIndex = curSI
# posRules.extend(subtablerules)
# otlConv.curLookupIndex = curLI
chainRules.append([rule, posRules])
# ruleContextPOS 7-1
# XXX support is incomplete
elif subtable.Format == 1:
log.warn('Support for GPOS LookupType 7, Format 1 is incomplete')
firstGlyphList = subtable.Coverage.glyphs
# for each glyph in the coverage table
for ri, subRuleSet in enumerate(subtable.PosRuleSet):
firstGlyph = firstGlyphList[ri]
for ctxRuleRec in subRuleSet.PosRule:
inputList = [firstGlyph] + ctxRuleRec.Input
inputTxt = "' ".join(inputList) + "'"
rule = "pos %s" % inputTxt
posRules = []
# for subsRec in ctxRuleRec.PosLookupRecord:
# lookup = pLookupList[subsRec.LookupListIndex]
# lookupType = lookup.LookupType
# curLI = otlConv.curLookupIndex
# otlConv.curLookupIndex = subsRec.LookupListIndex
# handler = otlConv.ruleHandlers[lookupType]
# contextRec = ContextRecord(inputList,
# subsRec.SequenceIndex)
# for si, sub_table in enumerate(lookup.SubTable):
# curSI = otlConv.curSubTableIndex
# otlConv.curSubTableIndex = si
# subtablerules = handler(
# sub_table, otlConv, contextRec)
# otlConv.curSubTableIndex = curSI
# posRules.extend(subtablerules)
# otlConv.curLookupIndex = curLI
chainRules.append([rule, posRules])
else:
raise AttributeError("Unknown Context Pos format %s" %
subtable.Format)
rules = []
for chainRule, posRules in sorted(chainRules):
rules.append(chainRule)
rules.extend(posRules)
return rules
def ruleChainContextPOS(subtable, otlConv, context=None):
chainRules = []
# pLookupList = otlConv.table.LookupList.Lookup
# ruleChainContextPOS 8-3
# XXX support is incomplete
if subtable.Format == 3:
log.warn('Support for GPOS LookupType 8, Format 3 is incomplete')
backtrackList = []
for backtrack_coverage in subtable.BacktrackCoverage:
glyphList = sorted(backtrack_coverage.glyphs)
for i in reversed(range(1, len(glyphList))):
if glyphList[i - 1] == glyphList[i]:
del glyphList[i]
backtrackList.append("[" + " ".join(glyphList) + "]")
backtrackList.reverse()
backTxt = " ".join(backtrackList)
inputList = []
inputList2 = []
for input_coverage in subtable.InputCoverage:
glyphList = sorted(input_coverage.glyphs)
for i in reversed(range(1, len(glyphList))):
if glyphList[i - 1] == glyphList[i]:
del glyphList[i]
inputList2.append(glyphList)
inputList.append("[" + " ".join(glyphList) + "]")
inputTxt = "' ".join(inputList) + "'"
lookAheadList = []
for lookahead_coverage in subtable.LookAheadCoverage:
glyphList = sorted(lookahead_coverage.glyphs)
for i in reversed(range(1, len(glyphList))):
if glyphList[i - 1] == glyphList[i]:
del glyphList[i]
lookAheadList.append("[" + " ".join(glyphList) + "]")
lookAheadTxt = " ".join(lookAheadList)
rule = "pos %s %s %s;" % (backTxt, inputTxt, lookAheadTxt)
posRules = []
# for subRec in subtable.PosLookupRecord:
# lookup = pLookupList[subRec.LookupListIndex]
# lookupType = lookup.LookupType
# curLI = otlConv.curLookupIndex
# otlConv.curLookupIndex = subRec.LookupListIndex
# handler = otlConv.ruleHandlers[lookupType]
# contextRec = ContextRecord(inputList2, subRec.SequenceIndex)
# for si, sub_table in enumerate(lookup.SubTable):
# curSI = otlConv.curSubTableIndex
# otlConv.curSubTableIndex = si
# subtablerules = handler(sub_table, otlConv, contextRec)
# otlConv.curSubTableIndex = curSI
# posRules.extend(subtablerules)
# otlConv.curLookupIndex = curLI
chainRules.append([rule, posRules])
# ruleChainContextPOS 8-2
# XXX support is incomplete
elif subtable.Format == 2:
log.warn('Support for GPOS LookupType 8, Format 2 is incomplete')
for i, ctxClassSet in enumerate(subtable.ChainSubClassSet):
if not ctxClassSet:
continue
for ctxClassRule in ctxClassSet.ChainPosRuleSet:
backTrackList = []
for classIndex in ctxClassRule.Backtrack:
className = otlConv.classesByLookup[
otlConv.curLookupIndex, otlConv.curSubTableIndex,
classIndex, otlConv.backtrackTag]
backTrackList.append(className)
backTxt = " ".join(backTrackList)
className = otlConv.classesByLookup[
otlConv.curLookupIndex, otlConv.curSubTableIndex, i,
otlConv.InputTag]
inputList = [className]
inputList2 = [otlConv.classesByClassName[className]]
for classIndex in ctxClassRule.Input:
className = otlConv.classesByLookup[
otlConv.curLookupIndex, otlConv.curSubTableIndex,
classIndex, otlConv.InputTag]
inputList.append(className)
inputList2.append(otlConv.classesByClassName[className])
inputTxt = "' ".join(inputList) + "'"
lookAheadList = []
for classIndex in ctxClassRule.LookAhead:
className = otlConv.classesByLookup[
otlConv.curLookupIndex, otlConv.curSubTableIndex,
classIndex, otlConv.lookAheadTag]
lookAheadList.append(className)
lookTxt = " ".join(lookAheadList)
rule = "sub %s %s %s;" % (backTxt, inputTxt, lookTxt)
posRules = []
# for subsRec in ctxClassRule.SubstLookupRecord:
# lookup = pLookupList[subsRec.LookupListIndex]
# lookupType = lookup.LookupType
# curLI = otlConv.curLookupIndex
# otlConv.curLookupIndex = subsRec.LookupListIndex
# handler = otlConv.ruleHandlers[lookupType]
# contextRec = ContextRecord(inputList,
# subsRec.SequenceIndex)
# for si, sub_table in enumerate(lookup.SubTable):
# curSI = otlConv.curSubTableIndex
# otlConv.curSubTableIndex = si
# subtablerules = handler(
# sub_table, otlConv, contextRec)
# otlConv.curSubTableIndex = curSI
# posRules.extend(subtablerules)
# otlConv.curLookupIndex = curLI
chainRules.append([rule, posRules])
# ruleChainContextPOS 8-1
# XXX support is incomplete
elif subtable.Format == 1:
log.warn('Support for GPOS LookupType 8, Format 1 is incomplete')
firstGlyphList = subtable.Coverage.glyphs
# for each glyph in the coverage table
for ri, subRuleSet in enumerate(subtable.ChainPosRuleSet):
firstGlyph = firstGlyphList[ri]
for ctxRuleRec in subRuleSet.ChainPosRule:
backList = ctxRuleRec.Backtrack
backList.reverse()
backTxt = " ".join(backList)
inputList = [firstGlyph] + ctxRuleRec.Input
inputTxt = "' ".join(inputList) + "'"
lookAheadTxt = " ".join(ctxRuleRec.LookAhead)
rule = "sub %s %s %s" % (backTxt, inputTxt, lookAheadTxt)
posRules = []
# for subsRec in ctxRuleRec.SubstLookupRecord:
# lookup = pLookupList[subsRec.LookupListIndex]
# lookupType = lookup.LookupType
# curLI = otlConv.curLookupIndex
# otlConv.curLookupIndex = subsRec.LookupListIndex
# handler = otlConv.ruleHandlers[lookupType]
# contextRec = ContextRecord(inputList,
# subsRec.SequenceIndex)
# for si, sub_table in enumerate(lookup.SubTable):
# curSI = otlConv.curSubTableIndex
# otlConv.curSubTableIndex = si
# subtablerules = handler(
# sub_table, otlConv, contextRec)
# otlConv.curSubTableIndex = curSI
# posRules.extend(subtablerules)
# otlConv.curLookupIndex = curLI
chainRules.append([rule, posRules])
else:
raise AttributeError("Unknown Chain Context Pos format %s" % (
subtable.Format))
rules = []
for chainRule, posRules in sorted(chainRules):
rules.append(chainRule)
rules.extend(posRules)
return rules
def ruleExt(subtable, otlConv, context=None):
handler = otlConv.ruleHandlers[subtable.ExtensionLookupType]
rules = handler(subtable.ExtSubTable, otlConv, context)
return rules
def ruleSingleSub(subtable, otlConv, context=None):
rules = []
noMatchRules = [] # contains the rules that don't match the context
if context:
indent = ChainINDENT
inputSeqList = context.inputList[context.sequenceIndex:]
else:
indent = ""
inputSeqList = []
for g1, g2 in subtable.mapping.items():
rule = note = None
if context and (not checkGlyphInSequence(g1, inputSeqList, 0)):
note = " # Note! Not in input sequence"
else:
note = ""
rule = "%ssub %s by %s;%s" % (indent, g1, g2, note)
if context and note:
noMatchRules.append(rule)
else:
rules.append(rule)
if rules:
return sorted(rules)
else:
return sorted(noMatchRules)
def ruleMultipleSub(subtable, otlConv, context=None):
rules = []
noMatchRules = [] # contains the rules that don't match the context
if context:
indent = ChainINDENT
inputSeqList = context.inputList[context.sequenceIndex:]
else:
indent = ""
inputSeqList = []
for g1, substGlyphList in subtable.mapping.items():
rule = note = None
if context and (not checkGlyphInSequence(g1, inputSeqList, 0)):
note = " # Note! Not in input sequence"
else:
note = ""
subTxt = " ".join(substGlyphList)
rule = "%ssub %s by [%s];%s" % (indent, g1, subTxt, note)
if context and note:
noMatchRules.append(rule)
else:
rules.append(rule)
if rules:
return sorted(rules)
else:
return sorted(noMatchRules)
def ruleAltSub(subtable, otlConv, context=None):
rules = []
noMatchRules = [] # contains the rules that don't match the context
if context:
indent = ChainINDENT
inputSeqList = context.inputList[context.sequenceIndex:]
else:
indent = ""
inputSeqList = []
for g1, alts in subtable.alternates.items():
rule = note = None
if context and (not checkGlyphInSequence(g1, inputSeqList, 0)):
note = " # Note! Not in input sequence"
else:
note = ""
altText = " ".join(sorted(alts))
rule = "%ssub %s from [%s];%s" % (indent, g1, altText, note)
if context and note:
noMatchRules.append(rule)
else:
rules.append(rule)
if rules:
return sorted(rules)
else:
return sorted(noMatchRules)
def ruleLigatureSub(subtable, otlConv, context=None):
rules = []
noMatchRules = [] # contains the rules that don't match the context
if context:
indent = ChainINDENT
inputSeqList = context.inputList[context.sequenceIndex:]
else:
indent = ""
inputSeqList = []
for item in subtable.ligatures.items():
ligs = item[1]
firstGlyph = item[0]
for lig in ligs:
rule = note = None
tokenList = [firstGlyph]
missing = []
foundAll = 1
if context:
try:
if not checkGlyphInSequence(firstGlyph, inputSeqList, 0):
raise ValueError
# make sure everything else is also present with
# the same index.
for i, gname in enumerate(lig.Component):
if not checkGlyphInSequence(gname, inputSeqList,
i + 1):
foundAll = 0
missing.append(gname)
if not foundAll:
note = (" # Note! lig components %s are not in input "
"sequence with same index."
% " ".join(missing))
else:
note = ""
except ValueError:
note = (" # Note! first glyph %s is not in input "
"sequence." % firstGlyph)
else:
note = ""
tokenList.extend(lig.Component)
glyphTxt = " ".join(tokenList)
rule = "%ssub %s by %s;%s" % (indent, glyphTxt, lig.LigGlyph, note)
if context and note:
noMatchRules.append(rule)
else:
rules.append(rule)
if rules:
return sorted(rules)
else:
return sorted(noMatchRules)
def ruleContextSUB(subtable, otlConv, context=None):
chainRules = []
# pLookupList = otlConv.table.LookupList.Lookup
# ruleContextSUB 5-3
# XXX support is incomplete
if subtable.Format == 3:
log.warn('Support for GSUB LookupType 5, Format 3 is incomplete')
inputList = []
inputList2 = []
for input_coverage in subtable.Coverage:
glyphList = sorted(input_coverage.glyphs)
for i in reversed(range(1, len(glyphList))):
if glyphList[i - 1] == glyphList[i]:
del glyphList[i]
inputList2.append(glyphList)
inputList.append("[" + " ".join(glyphList) + "]'")
inputTxt = "' ".join(inputList) + "'"
rule = "sub %s;" % inputTxt
subRules = []
# for subRec in subtable.SubstLookupRecord:
# lookup = pLookupList[subRec.LookupListIndex]
# lookupType = lookup.LookupType
# curLI = otlConv.curLookupIndex
# otlConv.curLookupIndex = subRec.LookupListIndex
# handler = otlConv.ruleHandlers[lookupType]
# contextRec = ContextRecord(inputList2, subRec.SequenceIndex)
# for si, sub_table in enumerate(lookup.SubTable):
# curSI = otlConv.curSubTableIndex
# otlConv.curSubTableIndex = si
# subtablerules = handler(sub_table, otlConv, contextRec)
# otlConv.curSubTableIndex = curSI
# subRules.extend(subtablerules)
# otlConv.curLookupIndex = curLI
chainRules.append([rule, subRules])
# ruleContextSUB 5-2
# XXX support is incomplete
elif subtable.Format == 2:
log.warn('Support for GSUB LookupType 5, Format 2 is incomplete')
for i, ctxClassSet in enumerate(subtable.SubClassSet):
if not ctxClassSet:
continue
for ctxClassRule in ctxClassSet.SubClassRule:
className = otlConv.classesByLookup[
otlConv.curLookupIndex, otlConv.curSubTableIndex, i, None]
inputList = [className]
inputList2 = [otlConv.classesByClassName[className]]
for classIndex in ctxClassRule.Class:
className = otlConv.classesByLookup[
otlConv.curLookupIndex, otlConv.curSubTableIndex,
classIndex, None]
inputList.append(className)
inputList2.append(otlConv.classesByClassName[className])
inputTxt = "' ".join(inputList) + "'"
rule = "sub %s;" % inputTxt
subRules = []
# for subsRec in ctxClassRule.SubstLookupRecord:
# lookup = pLookupList[subsRec.LookupListIndex]
# lookupType = lookup.LookupType
# curLI = otlConv.curLookupIndex
# otlConv.curLookupIndex = subsRec.LookupListIndex
# handler = otlConv.ruleHandlers[lookupType]
# contextRec = ContextRecord(
# inputList, subsRec.SequenceIndex)
# for si, sub_table in enumerate(lookup.SubTable):
# curSI = otlConv.curSubTableIndex
# otlConv.curSubTableIndex = si
# subtablerules = handler(
# sub_table, otlConv, contextRec)
# otlConv.curSubTableIndex = curSI
# subRules.extend(subtablerules)
# otlConv.curLookupIndex = curLI
chainRules.append([rule, subRules])
# ruleContextSUB 5-1
# XXX support is incomplete
elif subtable.Format == 1:
log.warn('Support for GSUB LookupType 5, Format 1 is incomplete')
firstGlyphList = subtable.Coverage.glyphs
# for each glyph in the coverage table
for ri, subRuleSet in enumerate(subtable.SubRuleSet):
firstGlyph = firstGlyphList[ri]
for ctxRuleRec in subRuleSet.SubRule:
inputList = [firstGlyph] + ctxRuleRec.Input
inputTxt = "' ".join(inputList) + "'"
rule = "sub %s" % inputTxt
subRules = []
# for subsRec in ctxRuleRec.SubstLookupRecord:
# lookup = pLookupList[subsRec.LookupListIndex]
# lookupType = lookup.LookupType
# curLI = otlConv.curLookupIndex
# otlConv.curLookupIndex = subsRec.LookupListIndex
# handler = otlConv.ruleHandlers[lookupType]
# contextRec = ContextRecord(inputList,
# subsRec.SequenceIndex)
# for si, sub_table in enumerate(lookup.SubTable):
# curSI = otlConv.curSubTableIndex
# otlConv.curSubTableIndex = si
# subtablerules = handler(
# sub_table, otlConv, contextRec)
# otlConv.curSubTableIndex = curSI
# subRules.extend(subtablerules)
# otlConv.curLookupIndex = curLI
chainRules.append([rule, subRules])
else:
raise AttributeError("Unknown Context Sub format %s" % (
subtable.Format))
rules = []
for chainRule, subRules in sorted(chainRules):
rules.append(chainRule)
rules.extend(subRules)
return rules
def wrap_statement(token_str):
"""
Wraps a long string of space-separated tokens
or a list of tokens.
"""
if isinstance(token_str, list):
token_str = ' '.join(token_str)
wrap_ind = '\n' + INDENT * 4
return wrap_ind.join(gtextWrapper.wrap(token_str))
def ruleChainContextSUB(subtable, otlConv, context=None):
chainRules = []
# pLookupList = otlConv.table.LookupList.Lookup
# ruleChainContextSUB 6-3
# XXX support is incomplete
if subtable.Format == 3:
log.warn('Support for GSUB LookupType 6, Format 3 is incomplete')
backtrackList = []
for backtrack_coverage in subtable.BacktrackCoverage:
glyphList = sorted(backtrack_coverage.glyphs)
for i in reversed(range(1, len(glyphList))):
if glyphList[i - 1] == glyphList[i]:
del glyphList[i]
backtrackList.append("[" + " ".join(glyphList) + "]")
backtrackList.reverse()
backTxt = " ".join(backtrackList)
inputList = []
inputList2 = []
for input_coverage in subtable.InputCoverage:
glyphList = sorted(input_coverage.glyphs)
for i in reversed(range(1, len(glyphList))):
if glyphList[i - 1] == glyphList[i]:
del glyphList[i]
inputList2.append(glyphList)
inputList.append("[" + " ".join(glyphList) + "]'")
inputTxt = " ".join(inputList)
lookAheadList = []
for lookahead_coverage in subtable.LookAheadCoverage:
glyphList = sorted(lookahead_coverage.glyphs)
for i in reversed(range(1, len(glyphList))):
if glyphList[i - 1] == glyphList[i]:
del glyphList[i]
lookAheadList.append("[" + " ".join(glyphList) + "]")
lookAheadTxt = " ".join(lookAheadList)
rule = wrap_statement(
"sub %s %s %s;" % (backTxt, inputTxt, lookAheadTxt))
subRules = []
# for subsRec in subtable.SubstLookupRecord:
# lookup = pLookupList[subsRec.LookupListIndex]
# lookupType = lookup.LookupType
# curLI = otlConv.curLookupIndex
# otlConv.curLookupIndex = subsRec.LookupListIndex
# handler = otlConv.ruleHandlers[lookupType]
# contextRec = ContextRecord(inputList2, subsRec.SequenceIndex)
# for si, sub_table in enumerate(lookup.SubTable):
# curSI = otlConv.curSubTableIndex
# otlConv.curSubTableIndex = si
# subtablerules = handler(sub_table, otlConv, contextRec)
# otlConv.curSubTableIndex = curSI
# subRules.extend(subtablerules)
# otlConv.curLookupIndex = curLI
chainRules.append([rule, subRules])
# ruleChainContextSUB 6-2
# XXX support is incomplete
elif subtable.Format == 2:
log.warn('Support for GSUB LookupType 6, Format 2 is incomplete')
for i, ctxClassSet in enumerate(subtable.ChainSubClassSet):
if not ctxClassSet:
continue
for ctxClassRule in ctxClassSet.ChainSubClassRule:
backTrackList = []
for classIndex in ctxClassRule.Backtrack:
className = otlConv.classesByLookup[
otlConv.curLookupIndex, otlConv.curSubTableIndex,
classIndex, otlConv.backtrackTag]
backTrackList.append(className)
backTxt = " ".join(backTrackList)
className = otlConv.classesByLookup[
otlConv.curLookupIndex, otlConv.curSubTableIndex, i,
otlConv.InputTag]
inputList = [className]
inputList2 = [otlConv.classesByClassName[className]]
for classIndex in ctxClassRule.Input:
className = otlConv.classesByLookup[
otlConv.curLookupIndex, otlConv.curSubTableIndex,
classIndex, otlConv.InputTag]
inputList.append(className)
inputList2.append(otlConv.classesByClassName[className])
inputTxt = " ".join(inputList)
lookAheadList = []
for classIndex in ctxClassRule.LookAhead:
className = otlConv.classesByLookup[
otlConv.curLookupIndex, otlConv.curSubTableIndex,
classIndex, otlConv.lookAheadTag]
lookAheadList.append(className)
lookTxt = " ".join(lookAheadList)
rule = "sub %s %s' %s;" % (backTxt, inputTxt, lookTxt)
subRules = []
# for subsRec in ctxClassRule.SubstLookupRecord:
# lookup = pLookupList[subsRec.LookupListIndex]
# lookupType = lookup.LookupType
# curLI = otlConv.curLookupIndex
# otlConv.curLookupIndex = subsRec.LookupListIndex
# handler = otlConv.ruleHandlers[lookupType]
# contextRec = ContextRecord(
# inputList, subsRec.SequenceIndex)
# for si, sub_table in enumerate(lookup.SubTable):
# curSI = otlConv.curSubTableIndex
# otlConv.curSubTableIndex = si
# subtablerules = handler(
# sub_table, otlConv, contextRec)
# otlConv.curSubTableIndex = curSI
# subRules.extend(subtablerules)
# otlConv.curLookupIndex = curLI
chainRules.append([rule, subRules])
# ruleChainContextSUB 6-1
# XXX support is incomplete
elif subtable.Format == 1:
log.warn('Support for GSUB LookupType 6, Format 1 is incomplete')
firstGlyphList = subtable.Coverage.glyphs
# for each glyph in the coverage table
for ri, subRuleSet in enumerate(subtable.ChainSubRuleSet):
firstGlyph = firstGlyphList[ri]
for ctxRuleRec in subRuleSet.ChainSubRule:
backList = ctxRuleRec.Backtrack
backList.reverse()
backTxt = " ".join(backList)
inputList = [firstGlyph] + ctxRuleRec.Input
inputTxt = " ".join(inputList)
lookAheadTxt = " ".join(ctxRuleRec.LookAhead)
rule = "sub %s %s' %s by " % (backTxt, inputTxt, lookAheadTxt)
subRules = []
# for subsRec in ctxRuleRec.SubstLookupRecord:
# lookup = pLookupList[subsRec.LookupListIndex]
# lookupType = lookup.LookupType
# curLI = otlConv.curLookupIndex
# otlConv.curLookupIndex = subsRec.LookupListIndex
# handler = otlConv.ruleHandlers[lookupType]
# contextRec = ContextRecord(inputList,
# subsRec.SequenceIndex)
# for si, sub_table in enumerate(lookup.SubTable):
# curSI = otlConv.curSubTableIndex
# otlConv.curSubTableIndex = si
# subtablerules = handler(
# sub_table, otlConv, contextRec)
# otlConv.curSubTableIndex = curSI
# subRules.extend(subtablerules)
# otlConv.curLookupIndex = curLI
chainRules.append([rule, subRules])
else:
raise AttributeError("Unknown Chain Context Sub format %s" % (
subtable.Format))
rules = []
for chainRule, subRules in sorted(chainRules):
rules.append(chainRule)
rules.extend(subRules)
return rules
def ruleReverseChainSub(subtable, otlConv, context=None):
rules = []
if subtable.Format == 1:
backtrackList = []
for backtrack_coverage in subtable.BacktrackCoverage:
glyphList = sorted(backtrack_coverage.glyphs)
for i in reversed(range(1, len(glyphList))):
if glyphList[i - 1] == glyphList[i]:
del glyphList[i]
backtrackTxt = "[ %s ] " % " ".join(glyphList)
backtrackList.append(backtrackTxt)
backtrackList.reverse()
backTxt = " ".join(backtrackList)
glyphList = subtable.Coverage.glyphs
# Since the substitute list is in the same order as the
# coverage table, I need to sort the substitution array
# in the same order as the input list.
glyphList = sorted([[glyph, i] for i, glyph in enumerate(glyphList)])
substituteList = []
if subtable.Substitute:
for entry in glyphList:
substituteList.append(subtable.Substitute[entry[1]])
for i in reversed(range(1, len(glyphList))):
if glyphList[i - 1] == glyphList[i]:
del glyphList[i]
del substituteList[i]
glyphList = [entry[0] for entry in glyphList]
inputTxt = "[ %s ]" % " ".join(glyphList)
lookAheadList = []
for lookahead_coverage in subtable.LookAheadCoverage:
glyphList = sorted(lookahead_coverage.glyphs)
for i in reversed(range(1, len(glyphList))):
if glyphList[i - 1] == glyphList[i]:
del glyphList[i]
lookAheadTxt = " [ %s ]" % " ".join(glyphList)
lookAheadList.append(lookAheadTxt)
lookAheadTxt = " ".join(lookAheadList)
if subtable.Substitute:
replText = " by [ %s ]" % (" ".join(substituteList))
else:
replText = ""
rule = "sub %s%s'%s%s;" % (backTxt, inputTxt, lookAheadTxt, replText)
rules.append(rule)
return rules
def getValRec(valueFormat, valueRec):
if valueFormat == 4:
return "%d" % valueRec.XAdvance
else:
xPos = yPos = xAdv = yAdv = 0
if valueFormat & 1:
xPos = valueRec.XPlacement
if valueFormat & 2:
yPos = valueRec.YPlacement
if valueFormat & 4:
xAdv = valueRec.XAdvance
if valueFormat & 8:
yAdv = valueRec.YAdvance
if xPos == yPos == xAdv == yAdv == 0:
return "0"
return getValueRecord(xPos, yPos, xAdv, yAdv)
gLookupFlagMap = {
1: "RightToLeft",
2: "IgnoreBaseGlyphs",
4: "IgnoreLigatures",
8: "IgnoreMarks",
0xFF00: "MarkAttachmentTypedef",
}
gLookupFlagKeys = sorted(gLookupFlagMap.keys())
def getLookupFlagTag(lookupFlag):
retList = []
for lookupVal in gLookupFlagKeys:
if lookupVal & lookupFlag:
retList.append(gLookupFlagMap[lookupVal])
if lookupVal == 0xFF00:
classIndex = (lookupVal & lookupFlag) >> 8
retList.append("%d" % classIndex)
return " ".join(retList)
DFLT_LANGSYS_KEYS = (("DFLT", "dflt"), ("dflt", "dflt"))
class OTLConverter(object):
leftSideTag = "Left"
rightSideTag = "Right"
backtrackTag = "BacktrackClassDef"
InputTag = "InputClassDef"
lookAheadTag = "LookAheadClassDef"
markTag = "MarkClass"
mark1Tag = "MarkMarkClass1"
mark2Tag = "MarkMarkClass2"
def __init__(self, writer, ttFont, table_tag):
self.writer = writer
self.table = ttFont[table_tag].table
self.classesByNameList = collections.defaultdict(list)
self.classesByLookup = {}
self.classesByClassName = {}
self.lookupIndex = -1
self.seenLookup = {}
self.glyphDict = {}
self.markClassesByDefList = collections.defaultdict(list)
self.markClassesByLookup = collections.defaultdict(list)
self.markClassesByClassName = {}
self.curLookupIndex = -1
self.curSubTableIndex = -1
self.curLookupName = ""
self.showExtension = ttFont.showExtensionFlag
for name in ttFont.getGlyphOrder():
self.glyphDict[name] = 0
if table_tag == "GPOS":
self.ExtensionIndex = 9
self.classHandlers = {
(2, 2): classPairGPOS,
(4, 1): markClassPOS,
(5, 1): markLigClassPOS,
(6, 1): markMarkClassPOS,
(7, 2): classContext,
(8, 2): classChainContext,
(9, 1): classExt,
}
self.ruleHandlers = {
1: ruleSinglePos,
2: rulePairPos,
3: ruleCursivePos,
4: ruleMarkBasePos,
5: ruleMarkLigPos,
6: ruleMarkMarkPos,
7: ruleContextPOS,
8: ruleChainContextPOS,
9: ruleExt,
}
elif table_tag == "GSUB":
self.ExtensionIndex = 7
self.classHandlers = {
(5, 2): classContext,
(6, 2): classChainContext,
(7, 1): classExt,
}
self.ruleHandlers = {
1: ruleSingleSub,
2: ruleMultipleSub,
3: ruleAltSub,
4: ruleLigatureSub,
5: ruleContextSUB,
6: ruleChainContextSUB,
7: ruleExt,
8: ruleReverseChainSub,
}
else:
raise KeyError("OTLConverter can only be called for GPOS and "
"GSUB tables")
def otlFeatureFormat(self):
# get and write language systems
lsList, featDictByLangSys, featDictByIndex = self.buildLangSys()
self.writeLangSys(lsList)
self.writer.newline()
# get and write class defs.
self.buildClasses()
self.writeClasses()
self.writeMarkClasses()
# do feature defs
self.doFeatures(featDictByLangSys)
@staticmethod
def addLangSysEntry(pfeatList, scriptTag, langTag, langSys,
featDictByLangSys, featDictByIndex):
for featIndex in langSys.FeatureIndex:
try:
featRecord = pfeatList[featIndex]
except IndexError:
log.error("FeatList does not contain index %s from current "
"langsys (%s, %s).", featIndex, scriptTag, langTag)
log.error("FeaturListLen %s, langsys FeatIndex %s.",
len(pfeatList), langSys.FeatureIndex)
continue
featTag = featRecord.FeatureTag
lang_sys = (scriptTag, langTag)
try:
featDictByIndex[featIndex].append(lang_sys)
except KeyError:
featDictByIndex[featIndex] = [lang_sys]
langsysDict = featDictByLangSys.get(featTag, {})
try:
langsysDict[lang_sys].append(featIndex)
except KeyError:
langsysDict[lang_sys] = [featIndex]
featDictByLangSys[featTag] = langsysDict
def buildLangSys(self):
pfeatList = self.table.FeatureList.FeatureRecord
featDictByLangSys = {}
featDictByIndex = {}
haveDFLT = False
lsList = []
for scriptRecord in self.table.ScriptList.ScriptRecord:
scriptTag = scriptRecord.ScriptTag
if scriptTag in ("DFLT", "dflt"):
haveDFLT = True
if scriptRecord.Script.DefaultLangSys:
langTag = "dflt"
lsList.insert(0, (
scriptTag, langTag,
scriptRecord.Script.DefaultLangSys.ReqFeatureIndex))
self.addLangSysEntry(pfeatList, scriptTag, langTag,
scriptRecord.Script.DefaultLangSys,
featDictByLangSys, featDictByIndex)
for langSysRecord in scriptRecord.Script.LangSysRecord:
langTag = langSysRecord.LangSysTag
lsList.append((scriptTag, langTag,
langSysRecord.LangSys.ReqFeatureIndex))
self.addLangSysEntry(pfeatList, scriptTag, langTag,
langSysRecord.LangSys, featDictByLangSys,
featDictByIndex)
if not haveDFLT:
# Synthesize a DFLT dflt entry.
addedDFLT = False
scriptCount = len(self.table.ScriptList.ScriptRecord)
for langsysList in featDictByIndex.values():
if len(langsysList) == scriptCount:
langsysList.insert(0, ("DFLT", "dflt", 0xFFFF))
addedDFLT = True
if addedDFLT:
lsList.insert(0, ("DFLT", "dflt", 0xFFFF))
return lsList, featDictByLangSys, featDictByIndex
def writeLangSys(self, lsList):
# lsList is alphabetic, because of font order, except that
# I moved (DFLT, dflt) script to the top.
for lang_sys in lsList:
if lang_sys[2] != 0xFFFF:
rfTxt = " Required Feature Index %s" % (lang_sys[2])
else:
rfTxt = ""
self.writer.write("languagesystem %s %s%s;" % (
lang_sys[0], lang_sys[1], rfTxt))
self.writer.newline()
def buildClasses(self):
for lookupIndex, lookup in enumerate(self.table.LookupList.Lookup):
self.curLookupIndex = lookupIndex
for subtableIndex, subtable in enumerate(lookup.SubTable):
self.curSubTableIndex = subtableIndex
handler = self.classHandlers.get(
(lookup.LookupType, subtable.Format))
if handler:
handler(subtable, self)
for nameList in sorted(self.classesByNameList.keys()):
classRecList = self.classesByNameList[nameList]
lenList = len(nameList)
if lenList == 0:
className = "@empty"
elif lenList == 1:
className = "[%s]" % (nameList[0])
elif lenList == 2:
className = "@%s_%s" % (nameList[0], nameList[1])
else:
className = "@%s_%d_%s" % (nameList[0], len(nameList),
nameList[-1])
i = 1
while (i < lenList) and className in self.classesByClassName:
className = "@%s_%s_%d_%s" % (nameList[0], nameList[i],
len(nameList), nameList[-1])
self.classesByClassName[className] = nameList
for classRec in classRecList:
key = (classRec.lookupIndex, classRec.subtableIndex,
classRec.classIndex, classRec.side)
self.classesByLookup[key] = className
for defList in sorted(self.markClassesByDefList.keys()):
classRecList = self.markClassesByDefList[defList]
defNameList = []
for nameList, anchor in defList:
defNameList.extend(list(nameList))
lenList = len(defNameList)
if lenList == 0:
className = "@empty_mark"
elif lenList == 1:
className = "%s_mark" % (defNameList[0])
elif lenList == 2:
className = "@%s_%s_mark" % (defNameList[0], defNameList[1])
else:
className = "@%s_%d_%s_mark" % (
defNameList[0], len(defNameList), defNameList[-1])
i = 1
while (i < lenList) and className in self.classesByClassName:
className = "@%s_%s_%d_%s_mark" % (
defNameList[0], defNameList[i], len(defNameList),
defNameList[-1])
self.markClassesByClassName[className] = defList
for classRec in classRecList:
key = (classRec.lookupIndex, classRec.subtableIndex,
classRec.classIndex, classRec.side)
self.markClassesByLookup[key] = className
def writeClasses(self):
classNames = list(self.classesByClassName.keys())
if classNames:
self.writer.newline()
self.writer.write(
"# Class definitions *********************************")
self.writer.newline()
for className in sorted(classNames):
if className[0] == "[":
# we don't write out the single glyph class names,
# as they are ued in -line as class lists.
continue
class_glyphs = " ".join(self.classesByClassName[className])
class_statement = "%s = [ %s ];" % (className, class_glyphs)
class_lines = gtextWrapper.wrap(class_statement)
for wrapped_line in class_lines:
self.writer._writeraw(wrapped_line)
self.writer.newline()
self.writer.newline()
def writeMarkClasses(self):
classNames = list(self.markClassesByClassName.keys())
if classNames:
self.writer.newline()
self.writer.write(
"# Mark Class definitions *********************************")
self.writer.newline()
for className in sorted(classNames):
classDef = self.markClassesByClassName[className]
for gnameList, anchor in classDef:
self.writer._writeraw("mark [")
class_lines = gtextWrapper.wrap(" ".join(gnameList))
num_lines = len(class_lines)
for ln, wrapped_line in enumerate(class_lines):
self.writer._writeraw(wrapped_line)
if ln < num_lines - 1:
self.writer.newline()
self.writer._writeraw("] %s %s;" % (anchor, className))
self.writer.newline()
self.writer.newline()
def doFeatures(self, featDictByLangSys):
for featTag in sorted(featDictByLangSys.keys()):
self.writer.write("feature %s {" % featTag)
self.writer.newline()
self.writer.indent()
langSysDict = featDictByLangSys[featTag]
langSysTagList = sorted(langSysDict.keys())
dflt_langsys_key = None
dfltFeatIndexList = None
for key in DFLT_LANGSYS_KEYS:
if key in langSysDict:
dfltFeatIndexList = langSysDict[key]
langSysTagList.remove(key)
dflt_langsys_key = key
break
dfltLookupIndexDict = None
if dfltFeatIndexList:
dfltLookupIndexDict = self.writeDfltLangSysFeat(
dflt_langsys_key, dfltFeatIndexList)
if dflt_langsys_key:
prevLangSysKey = dflt_langsys_key
else:
prevLangSysKey = DFLT_LANGSYS_KEYS[0]
for langSysKey in langSysTagList:
self.writeLangSysFeat(langSysKey, prevLangSysKey,
langSysDict[langSysKey],
dfltLookupIndexDict)
prevLangSysKey = langSysKey
self.writer.dedent()
self.writer.write("} %s;" % featTag)
self.writer.newline()
self.writer.newline()
def writeDfltLangSysFeat(self, langSysKey, featIndexList):
# XXX similar to writeLangSysFeat()
pfeatList = self.table.FeatureList.FeatureRecord
pLookupList = self.table.LookupList.Lookup
featRecord = None
lookupIndexDict = {}
# key: lookup index (int)
# val: fontTools.ttLib.tables.otTables.Lookup (object)
for featIndex in featIndexList:
featRecord = pfeatList[featIndex]
for lookupIndex in featRecord.Feature.LookupListIndex:
lookupIndexDict[lookupIndex] = pLookupList[lookupIndex]
excludeDFLTtxt = ""
nameIndex = 0
for li in sorted(lookupIndexDict.keys()):
self.curLookupIndex = li
lookup = pLookupList[li]
lookupFlagTxt = getLookupFlagTag(lookup.LookupFlag)
useExtension = ""
if self.showExtension and lookup.LookupType == self.ExtensionIndex:
useExtension = " useExtension"
if li in self.seenLookup:
lookupName = self.seenLookup[li]
self.writer.write("lookup %s%s;" % (
lookupName, excludeDFLTtxt))
self.writer.newline()
else:
lookupName = "%s_%s_%s_%s" % (
featRecord.FeatureTag, langSysKey[0], langSysKey[1],
nameIndex)
self.seenLookup[li] = lookupName
self.writer.write("lookup %s%s {" % (lookupName,
useExtension))
excludeDFLTtxt = "" # Only need to write it once.
self.writer.newline()
self.writer.indent()
if lookupFlagTxt:
self.writer.write("lookupflag %s;" % lookupFlagTxt)
self.writer.newline()
self.curLookupIndex = li
self.writeLookup(lookup)
self.writer.dedent()
self.writer.write("} %s;" % lookupName)
self.writer.newline()
self.writer.newline()
nameIndex += 1
return lookupIndexDict
def writeLangSysFeat(self, langSysKey, prevLangSysKey,
featIndexList, dfltLookupIndexDict):
pfeatList = self.table.FeatureList.FeatureRecord
pLookupList = self.table.LookupList.Lookup
featRecord = None
lookupIndexDict = {}
# key: lookup index (int)
# val: fontTools.ttLib.tables.otTables.Lookup (object)
for featIndex in featIndexList:
featRecord = pfeatList[featIndex]
for lookupIndex in featRecord.Feature.LookupListIndex:
lookupIndexDict[lookupIndex] = pLookupList[lookupIndex]
# Remove all lookups shared with the DFLT/dflt script.
# Note if there were any; if not, then we need to use the
# exclude keyword with the lookup.
haveAnyDflt = False
excludeDFLT = False
if dfltLookupIndexDict:
for lookupIndex in sorted(lookupIndexDict.keys()):
if lookupIndex in dfltLookupIndexDict:
del lookupIndexDict[lookupIndex]
haveAnyDflt = True
if not haveAnyDflt:
excludeDFLT = True
liList = sorted(lookupIndexDict.keys())
excludeDFLTtxt = ""
if excludeDFLT:
excludeDFLTtxt = " excludeDFLT"
# If all the lookups were shared with DFLt dflt,
# no need to write anything.
if liList:
nameIndex = 0
if prevLangSysKey[0] != langSysKey[0]:
self.writer.write("script %s;" % (langSysKey[0]))
self.writer.newline()
self.writer.write("language %s;" % (langSysKey[1]))
self.writer.newline()
elif prevLangSysKey[1] != langSysKey[1]:
self.writer.write("language %s;" % (langSysKey[1]))
self.writer.newline()
for li in liList:
self.curLookupIndex = li
lookup = pLookupList[li]
lookupFlagTxt = getLookupFlagTag(lookup.LookupFlag)
useExtension = ""
if self.showExtension and (
lookup.LookupType == self.ExtensionIndex):
useExtension = " useExtension"
if li in self.seenLookup:
lookupName = self.seenLookup[li]
self.writer.write("lookup %s%s;" % (
lookupName, excludeDFLTtxt))
excludeDFLTtxt = "" # Only need to write it once.
self.writer.newline()
else:
lookupName = "%s_%s_%s_%s" % (
featRecord.FeatureTag, langSysKey[0].strip(),
langSysKey[1].strip(), nameIndex)
self.seenLookup[li] = lookupName
self.writer.write("lookup %s%s {" % (lookupName,
useExtension))
excludeDFLTtxt = "" # Only need to write it once.
self.writer.newline()
self.writer.indent()
if lookupFlagTxt:
self.writer.write("lookupflag %s;" % lookupFlagTxt)
self.writer.newline()
self.curLookupIndex = li
self.writeLookup(lookup)
self.writer.dedent()
self.writer.write("} %s;" % lookupName)
self.writer.newline()
self.writer.newline()
nameIndex += 1
def writeLookup(self, lookup):
lookupType = lookup.LookupType
handler = self.ruleHandlers[lookupType]
for si, sub_table in enumerate(lookup.SubTable):
self.curSubTableIndex = si
rules = handler(sub_table, self)
for rule in rules:
self.writer._writeraw(rule)
self.writer.newline()
def dumpOTLAsFeatureFile(writer, ttFont, table_tag):
otlConverter = OTLConverter(writer, ttFont, table_tag)
otlConverter.otlFeatureFormat()
class TTXNTTFont(TTFont):
def __init__(self, file=None, res_name_or_index=None,
sfntVersion="\000\001\000\000", flavor=None,
checkChecksums=False, verbose=None, recalcBBoxes=True,
allowVID=False, ignoreDecompileErrors=False,
recalcTimestamp=True, fontNumber=-1, lazy=None, quiet=None,
supressHints=False, showExtensionFlag=False):
self.filePath = file
self.supressHints = supressHints
self.showExtensionFlag = showExtensionFlag
TTFont. __init__(self, file, res_name_or_index=res_name_or_index,
sfntVersion=sfntVersion, flavor=flavor,
checkChecksums=checkChecksums, verbose=verbose,
recalcBBoxes=recalcBBoxes, allowVID=allowVID,
ignoreDecompileErrors=ignoreDecompileErrors,
recalcTimestamp=recalcTimestamp,
fontNumber=fontNumber, lazy=lazy, quiet=quiet)
def _tableToXML(self, writer, tag, quiet=None, splitGlyphs=False):
table = None
if quiet is not None:
from fontTools.misc.loggingTools import deprecateArgument
deprecateArgument("quiet", "configure logging instead")
if tag in self:
table = self[tag]
report = "Dumping '%s' table..." % tag
else:
report = "No '%s' table found." % tag
log.info(report)
if tag not in self:
return
xmlTag = tagToXML(tag)
attrs = dict()
if hasattr(table, "ERROR"):
attrs['ERROR'] = "decompilation error"
if table.__class__ == DefaultTable:
attrs['raw'] = True
writer.begintag(xmlTag, **attrs)
writer.newline()
if tag in ("glyf", "CFF "):
dumpFont(writer, self.filePath, self.supressHints)
elif tag in ("GSUB", "GPOS"):
dumpOTLAsFeatureFile(writer, self, tag)
else:
table.toXML(writer, self)
writer.endtag(xmlTag)
writer.newline()
writer.newline()
def shellcmd(cmdList):
# In all cases, assume that cmdList does NOT specify the output file.
# I use this because tx -dump -6 can be very large.
tempPath = get_temp_file_path()
cmdList.append(tempPath)
subprocess.check_call(cmdList)
with open(tempPath, "r", encoding="utf-8") as fp:
data = fp.read()
return data
def dumpFont(writer, fontPath, supressHints=False):
dictTxt = shellcmd([TX_TOOL, "-dump", "-0", fontPath])
if curSystem == "Windows":
dictTxt = re.sub(r"[\r\n]+", "\n", dictTxt)
dictTxt = re.sub(r"##[^\r\n]*Filename[^\r\n]+", "", dictTxt, 1).strip()
dictLines = dictTxt.splitlines()
writer.begintag("FontTopDict")
writer.newline()
for line in dictLines:
writer._writeraw(line)
writer.newline()
writer.endtag("FontTopDict")
writer.newline()
if supressHints:
charData = shellcmd([TX_TOOL, "-dump", "-6", "-n", fontPath])
else:
charData = shellcmd([TX_TOOL, "-dump", "-6", fontPath])
if curSystem == "Windows":
charData = re.sub(r"[\r\n]+", "\n", charData)
charList = re.findall(r"[^ ]glyph\[[^]]+\] {([^,]+),[^\r\n]+,([^}]+)",
charData)
if "cid.CIDFontName" in dictTxt:
# fix glyph names to sort
charList = [("cid%s" % (entry[0]).zfill(5),
entry[1]) for entry in charList]
charList = sorted([entry[0] + entry[1] for entry in charList])
charTxt = "\n".join(charList)
writer.begintag("FontOutlines")
writer.newline()
for line in charTxt.splitlines():
writer._writeraw(line)
writer.newline()
writer.endtag("FontOutlines")
writer.newline()
@Timer(log, 'Done dumping TTX in %(time).3f seconds')
def ttnDump(input_file, output, options, showExtensionFlag, supressHints=False,
supressVersions=False, supressTTFDiffs=False):
log.info('Dumping "%s" to "%s"...', input_file, output)
if options.unicodedata:
from fontTools.unicode import setUnicodeData
setUnicodeData(options.unicodedata)
ttf = TTXNTTFont(input_file, 0, allowVID=options.allowVID,
ignoreDecompileErrors=options.ignoreDecompileErrors,
fontNumber=options.fontNumber, supressHints=supressHints,
showExtensionFlag=showExtensionFlag)
kDoNotDumpTagList = ["GlyphOrder", "DSIG"]
if options.onlyTables:
onlyTables = sorted(options.onlyTables)
else:
onlyTables = sorted(ttf.keys())
if options.skipTables:
for tag in options.skipTables:
if tag in onlyTables:
onlyTables.remove(tag)
for tag in kDoNotDumpTagList:
if tag in onlyTables:
onlyTables.remove(tag)
# Zero values that always differ.
if 'head' in onlyTables:
head = ttf["head"]
temp = head.created
head.created = 0
head.modified = 0
head.magicNumber = 0
head.checkSumAdjustment = 0
if supressVersions:
head.fontRevision = 0
if 'hmtx' in onlyTables:
hmtx = ttf["hmtx"]
# hmtx must be decompiled *before* we zero
# the hhea.numberOfHMetrics value
temp = hmtx.metrics
if supressTTFDiffs:
try:
del temp["CR"]
except KeyError:
pass
try:
del temp["NULL"]
except KeyError:
pass
if 'hhea' in onlyTables:
hhea = ttf["hhea"]
temp = hhea.numberOfHMetrics
hhea.numberOfHMetrics = 0
if 'vmtx' in onlyTables:
vmtx = ttf["vmtx"]
# vmtx must be decompiled *before* we zero
# the vhea.numberOfHMetrics value
temp = vmtx.metrics
if supressTTFDiffs:
try:
del temp["CR"]
except KeyError:
pass
try:
del temp["NULL"]
except KeyError:
pass
if 'vhea' in onlyTables:
vhea = ttf["vhea"]
temp = vhea.numberOfVMetrics
vhea.numberOfVMetrics = 0
if supressVersions:
if 'name' in onlyTables:
name_table = ttf["name"]
for namerecord in name_table.names:
if namerecord.nameID == 3:
if namerecord.isUnicode():
namerecord.string = "VERSION SUPPRESSED".encode(
"utf-16be")
else:
namerecord.string = "VERSION SUPPRESSED"
elif namerecord.nameID == 5:
if namerecord.platformID == 3:
namerecord.string = namerecord.string.split(';')[0]
else:
namerecord.string = "VERSION SUPPRESSED"
if 'GDEF' in onlyTables:
GDEF = ttf["GDEF"]
gt = GDEF.table
if gt.GlyphClassDef:
gt.GlyphClassDef.Format = 0
if gt.MarkAttachClassDef:
gt.MarkAttachClassDef.Format = 0
if gt.AttachList:
if not gt.AttachList.Coverage.glyphs:
gt.AttachList = None
if gt.LigCaretList:
if not gt.LigCaretList.Coverage.glyphs:
gt.LigCaretList = None
if 'cmap' in onlyTables:
# remove mappings to notdef.
cmapTable = ttf["cmap"]
""" Force shared cmap tables to get separately decompiled.
The _c_m_a_p.py logic will decompile a cmap from source data if an
attempt is made to access a field which is not (yet) defined. Unicode
(format 4) subtables are usually identical, and thus shared. When
intially decompiled, the first gets fully decompiled, and then the
second gets a reference to the 'cmap' dict of the first.
When entries are removed from the cmap dict of the first subtable,
that is the same as removing them from the cmap dict of the second.
However, when later an attempt is made to reference the 'nGroups'
field - which doesn't exist in format 4 - the second table gets
fully decompiled, and its cmap dict is rebuilt from the original data.
"""
for cmapSubtable in cmapTable.tables:
# if cmapSubtable.format != 4:
# continue
delList = []
for mapping in cmapSubtable.cmap.items():
if mapping[1] == ".notdef":
delList.append(mapping)
if supressTTFDiffs:
if mapping[1] == "CR":
delList.append(mapping)
if mapping[1] == "NULL":
delList.append(mapping)
if delList:
for charCode, glyphName in delList:
try:
del cmapSubtable.cmap[charCode]
except KeyError:
pass
if (cmapSubtable.format in [12, 14]) and hasattr(cmapSubtable,
"nGroups"):
cmapSubtable.nGroups = 0
if hasattr(cmapSubtable, "length"):
cmapSubtable.length = 0
if onlyTables:
ttf.saveXML(output,
tables=onlyTables,
skipTables=options.skipTables,
splitTables=options.splitTables,
splitGlyphs=options.splitGlyphs,
disassembleInstructions=options.disassembleInstructions,
bitmapGlyphDataFormat=options.bitmapGlyphDataFormat)
ttf.close()
return ttf
def main(args=None):
from fontTools import configLogger
if args is None:
args = sys.argv[1:]
if ("-h" in args) or ("-u" in args):
print(__help__)
return 0
if "-a" not in args:
args.insert(0, "-a") # allow virtual GIDS.
if "-nh" in args:
args.remove("-nh")
supressHints = True
else:
supressHints = False
if "-se" in args:
args.remove("-se")
showExtensionFlag = True
else:
showExtensionFlag = False
if "-nv" in args:
args.remove("-nv")
supressVersions = True
else:
supressVersions = False
if "-supressTTFDiffs" in args:
args.remove("-supressTTFDiffs")
supressTTFDiffs = True
else:
supressTTFDiffs = False
try:
jobs, options = ttx.parseOptions(args)
except getopt.GetoptError as e:
print("ERROR:", e, file=sys.stderr)
return 2
configLogger(level=options.logLevel)
try:
for action, input_file, output in jobs:
if action != ttx.ttDump:
log.error("ttxn can only dump font files.")
return 1
ttnDump(input_file, output, options, showExtensionFlag,
supressHints, supressVersions, supressTTFDiffs)
except SystemExit:
raise
except TTLibError as e:
log.error(e)
return 1
if __name__ == "__main__":
sys.exit(main())
| 37.78054 | 79 | 0.55878 |
894d4ccc61481c4da19fdaef449b9d798b8e7e5b | 9,916 | py | Python | venv/lib/python3.8/site-packages/hypothesis/internal/conjecture/junkdrawer.py | tausiftt5238/cs5204_election_guard | 0ca41c95e3f0f1d0146803d3308f031c57de680c | [
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/hypothesis/internal/conjecture/junkdrawer.py | tausiftt5238/cs5204_election_guard | 0ca41c95e3f0f1d0146803d3308f031c57de680c | [
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/hypothesis/internal/conjecture/junkdrawer.py | tausiftt5238/cs5204_election_guard | 0ca41c95e3f0f1d0146803d3308f031c57de680c | [
"MIT"
] | null | null | null | # This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Most of this work is copyright (C) 2013-2020 David R. MacIver
# (david@drmaciver.com), but it contains contributions by others. See
# CONTRIBUTING.rst for a full list of people who may hold copyright, and
# consult the git log if you need to determine who owns an individual
# contribution.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
#
# END HEADER
"""A module for miscellaneous useful bits and bobs that don't
obviously belong anywhere else. If you spot a better home for
anything that lives here, please move it."""
import array
import sys
def array_or_list(code, contents):
if code == "O":
return list(contents)
return array.array(code, contents)
def replace_all(buffer, replacements):
"""Substitute multiple replacement values into a buffer.
Replacements is a list of (start, end, value) triples.
"""
result = bytearray()
prev = 0
offset = 0
for u, v, r in replacements:
result.extend(buffer[prev:u])
result.extend(r)
prev = v
offset += len(r) - (v - u)
result.extend(buffer[prev:])
assert len(result) == len(buffer) + offset
return bytes(result)
ARRAY_CODES = ["B", "H", "I", "L", "Q", "O"]
NEXT_ARRAY_CODE = dict(zip(ARRAY_CODES, ARRAY_CODES[1:]))
class IntList:
"""Class for storing a list of non-negative integers compactly.
We store them as the smallest size integer array we can get
away with. When we try to add an integer that is too large,
we upgrade the array to the smallest word size needed to store
the new value."""
__slots__ = ("__underlying",)
def __init__(self, values=()):
for code in ARRAY_CODES:
try:
self.__underlying = array_or_list(code, values)
break
except OverflowError:
pass
else: # pragma: no cover
raise AssertionError("Could not create storage for %r" % (values,))
if isinstance(self.__underlying, list):
for v in self.__underlying:
if v < 0 or not isinstance(v, int):
raise ValueError("Could not create IntList for %r" % (values,))
@classmethod
def of_length(self, n):
return IntList(array_or_list("B", [0]) * n)
def count(self, n):
return self.__underlying.count(n)
def __repr__(self):
return "IntList(%r)" % (list(self),)
def __len__(self):
return len(self.__underlying)
def __getitem__(self, i):
if isinstance(i, slice):
return IntList(self.__underlying[i])
return self.__underlying[i]
def __delitem__(self, i):
del self.__underlying[i]
def insert(self, i, v):
self.__underlying.insert(i, v)
def __iter__(self):
return iter(self.__underlying)
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, IntList):
return NotImplemented
return self.__underlying == other.__underlying
def __ne__(self, other):
if self is other:
return False
if not isinstance(other, IntList):
return NotImplemented
return self.__underlying != other.__underlying
def append(self, n):
i = len(self)
self.__underlying.append(0)
self[i] = n
def __setitem__(self, i, n):
while True:
try:
self.__underlying[i] = n
return
except OverflowError:
assert n > 0
self.__upgrade()
def extend(self, ls):
for n in ls:
self.append(n)
def __upgrade(self):
code = NEXT_ARRAY_CODE[self.__underlying.typecode]
self.__underlying = array_or_list(code, self.__underlying)
def binary_search(lo, hi, f):
"""Binary searches in [lo , hi) to find
n such that f(n) == f(lo) but f(n + 1) != f(lo).
It is implicitly assumed and will not be checked
that f(hi) != f(lo).
"""
reference = f(lo)
while lo + 1 < hi:
mid = (lo + hi) // 2
if f(mid) == reference:
lo = mid
else:
hi = mid
return lo
def uniform(random, n):
"""Returns a bytestring of length n, distributed uniformly at random."""
return random.getrandbits(n * 8).to_bytes(n, "big")
class LazySequenceCopy:
"""A "copy" of a sequence that works by inserting a mask in front
of the underlying sequence, so that you can mutate it without changing
the underlying sequence. Effectively behaves as if you could do list(x)
in O(1) time. The full list API is not supported yet but there's no reason
in principle it couldn't be."""
def __init__(self, values):
self.__values = values
self.__len = len(values)
self.__mask = None
def __len__(self):
return self.__len
def pop(self):
if len(self) == 0:
raise IndexError("Cannot pop from empty list")
result = self[-1]
self.__len -= 1
if self.__mask is not None:
self.__mask.pop(self.__len, None)
return result
def __getitem__(self, i):
i = self.__check_index(i)
default = self.__values[i]
if self.__mask is None:
return default
else:
return self.__mask.get(i, default)
def __setitem__(self, i, v):
i = self.__check_index(i)
if self.__mask is None:
self.__mask = {}
self.__mask[i] = v
def __check_index(self, i):
n = len(self)
if i < -n or i >= n:
raise IndexError("Index %d out of range [0, %d)" % (i, n))
if i < 0:
i += n
assert 0 <= i < n
return i
def clamp(lower, value, upper):
"""Given a value and lower/upper bounds, 'clamp' the value so that
it satisfies lower <= value <= upper."""
return max(lower, min(value, upper))
def swap(ls, i, j):
"""Swap the elements ls[i], ls[j]."""
if i == j:
return
ls[i], ls[j] = ls[j], ls[i]
def stack_depth_of_caller():
"""Get stack size for caller's frame.
From https://stackoverflow.com/a/47956089/9297601 , this is a simple
but much faster alternative to `len(inspect.stack(0))`. We use it
with get/set recursionlimit to make stack overflows non-flaky; see
https://github.com/HypothesisWorks/hypothesis/issues/2494 for details.
"""
frame = sys._getframe(2)
size = 1
while frame:
frame = frame.f_back
size += 1
return size
def find_integer(f):
"""Finds a (hopefully large) integer such that f(n) is True and f(n + 1) is
False.
f(0) is assumed to be True and will not be checked.
"""
# We first do a linear scan over the small numbers and only start to do
# anything intelligent if f(4) is true. This is because it's very hard to
# win big when the result is small. If the result is 0 and we try 2 first
# then we've done twice as much work as we needed to!
for i in range(1, 5):
if not f(i):
return i - 1
# We now know that f(4) is true. We want to find some number for which
# f(n) is *not* true.
# lo is the largest number for which we know that f(lo) is true.
lo = 4
# Exponential probe upwards until we find some value hi such that f(hi)
# is not true. Subsequently we maintain the invariant that hi is the
# smallest number for which we know that f(hi) is not true.
hi = 5
while f(hi):
lo = hi
hi *= 2
# Now binary search until lo + 1 = hi. At that point we have f(lo) and not
# f(lo + 1), as desired..
while lo + 1 < hi:
mid = (lo + hi) // 2
if f(mid):
lo = mid
else:
hi = mid
return lo
def pop_random(random, seq):
"""Remove and return a random element of seq. This runs in O(1) but leaves
the sequence in an arbitrary order."""
i = random.randrange(0, len(seq))
swap(seq, i, len(seq) - 1)
return seq.pop()
class NotFound(Exception):
pass
class SelfOrganisingList:
"""A self-organising list with the move-to-front heuristic.
A self-organising list is a collection which we want to retrieve items
that satisfy some predicate from. There is no faster way to do this than
a linear scan (as the predicates may be arbitrary), but the performance
of a linear scan can vary dramatically - if we happen to find a good item
on the first try it's O(1) after all. The idea of a self-organising list is
to reorder the list to try to get lucky this way as often as possible.
There are various heuristics we could use for this, and it's not clear
which are best. We use the simplest, which is that every time we find
an item we move it to the "front" (actually the back in our implementation
because we iterate in reverse) of the list.
"""
def __init__(self, values=()):
self.__values = list(values)
def __repr__(self):
return "SelfOrganisingList(%r)" % (self.__values,)
def add(self, value):
"""Add a value to this list."""
self.__values.append(value)
def find(self, condition):
"""Returns some value in this list such that ``condition(value)`` is
True. If no such value exists raises ``NotFound``."""
for i in range(len(self.__values) - 1, -1, -1):
value = self.__values[i]
if condition(value):
del self.__values[i]
self.__values.append(value)
return value
raise NotFound("No values satisfying condition")
| 30.231707 | 83 | 0.610226 |
aa3dc28b1e9503a31eb527729e6ed742e2759929 | 649 | py | Python | tests/test_pypkgs_dlb.py | dusty736/pypkgs_dlb | 23a4f762f53d9c43a2e05c5565ea9f83126c707a | [
"MIT"
] | null | null | null | tests/test_pypkgs_dlb.py | dusty736/pypkgs_dlb | 23a4f762f53d9c43a2e05c5565ea9f83126c707a | [
"MIT"
] | null | null | null | tests/test_pypkgs_dlb.py | dusty736/pypkgs_dlb | 23a4f762f53d9c43a2e05c5565ea9f83126c707a | [
"MIT"
] | null | null | null | from pypkgs_dlb import __version__
from pypkgs_dlb import pypkgs_dlb
import pandas as pd
def test_version():
assert __version__ == "0.1.0"
def test_catbind():
a = pd.Categorical(["character", "hits", "your", "eyeballs"])
b = pd.Categorical(["but", "integer", "where it", "counts"])
assert ((pypkgs_dlb.catbind(a, b)).codes == [1, 4, 7, 3, 0, 5, 6, 2]).all()
assert (
(pypkgs_dlb.catbind(a, b)).categories
== [
"but",
"character",
"counts",
"eyeballs",
"hits",
"integer",
"where it",
"your",
]
).all()
| 24.037037 | 79 | 0.508475 |
b0c25d9663e1ddad0da358df81146bea73aab857 | 6,148 | py | Python | src/streamlink/plugins/ustvnow.py | gromit1811/streamlink | ad95081e894316e154800cecf1c7b5e7062535f3 | [
"BSD-2-Clause"
] | null | null | null | src/streamlink/plugins/ustvnow.py | gromit1811/streamlink | ad95081e894316e154800cecf1c7b5e7062535f3 | [
"BSD-2-Clause"
] | null | null | null | src/streamlink/plugins/ustvnow.py | gromit1811/streamlink | ad95081e894316e154800cecf1c7b5e7062535f3 | [
"BSD-2-Clause"
] | null | null | null | import base64
import json
import logging
import re
from urllib.parse import urljoin, urlparse
from uuid import uuid4
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from Crypto.Util.Padding import pad, unpad
from streamlink import PluginError
from streamlink.plugin import Plugin, PluginArguments, PluginArgument
from streamlink.stream import HLSStream
log = logging.getLogger(__name__)
class USTVNow(Plugin):
_url_re = re.compile(r"https?://(?:www\.)?ustvnow\.com/live/(?P<scode>\w+)/-(?P<id>\d+)")
_main_js_re = re.compile(r"""src=['"](main\..*\.js)['"]""")
_enc_key_re = re.compile(r'(?P<key>AES_(?:Key|IV))\s*:\s*"(?P<value>[^"]+)"')
TENANT_CODE = "ustvnow"
_api_url = "https://teleupapi.revlet.net/service/api/v1/"
_token_url = _api_url + "get/token"
_signin_url = "https://www.ustvnow.com/signin"
arguments = PluginArguments(
PluginArgument(
"username",
metavar="USERNAME",
required=True,
help="Your USTV Now account username"
),
PluginArgument(
"password",
sensitive=True,
metavar="PASSWORD",
required=True,
help="Your USTV Now account password",
prompt="Enter USTV Now account password"
)
)
def __init__(self, url):
super(USTVNow, self).__init__(url)
self._encryption_config = {}
self._token = None
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url) is not None
@classmethod
def encrypt_data(cls, data, key, iv):
rkey = "".join(reversed(key)).encode('utf8')
riv = "".join(reversed(iv)).encode('utf8')
fkey = SHA256.new(rkey).hexdigest()[:32].encode("utf8")
cipher = AES.new(fkey, AES.MODE_CBC, riv)
encrypted = cipher.encrypt(pad(data, 16, 'pkcs7'))
return base64.b64encode(encrypted)
@classmethod
def decrypt_data(cls, data, key, iv):
rkey = "".join(reversed(key)).encode('utf8')
riv = "".join(reversed(iv)).encode('utf8')
fkey = SHA256.new(rkey).hexdigest()[:32].encode("utf8")
cipher = AES.new(fkey, AES.MODE_CBC, riv)
decrypted = cipher.decrypt(base64.b64decode(data))
if decrypted:
return unpad(decrypted, 16, 'pkcs7')
else:
return decrypted
def _get_encryption_config(self, url):
# find the path to the main.js
# load the main.js and extract the config
if not self._encryption_config:
res = self.session.http.get(url)
m = self._main_js_re.search(res.text)
main_js_path = m and m.group(1)
if main_js_path:
res = self.session.http.get(urljoin(url, main_js_path))
self._encryption_config = dict(self._enc_key_re.findall(res.text))
return self._encryption_config.get("AES_Key"), self._encryption_config.get("AES_IV")
@property
def box_id(self):
if not self.cache.get("box_id"):
self.cache.set("box_id", str(uuid4()))
return self.cache.get("box_id")
def get_token(self):
"""
Get the token for USTVNow
:return: a valid token
"""
if not self._token:
log.debug("Getting new session token")
res = self.session.http.get(self._token_url, params={
"tenant_code": self.TENANT_CODE,
"box_id": self.box_id,
"product": self.TENANT_CODE,
"device_id": 5,
"display_lang_code": "ENG",
"device_sub_type": "",
"timezone": "UTC"
})
data = res.json()
if data['status']:
self._token = data['response']['sessionId']
log.debug("New token: {}".format(self._token))
else:
log.error("Token acquisition failed: {details} ({detail})".format(**data['error']))
raise PluginError("could not obtain token")
return self._token
def api_request(self, path, data, metadata=None):
key, iv = self._get_encryption_config(self._signin_url)
post_data = {
"data": self.encrypt_data(json.dumps(data).encode('utf8'), key, iv).decode("utf8"),
"metadata": self.encrypt_data(json.dumps(metadata).encode('utf8'), key, iv).decode("utf8")
}
headers = {"box-id": self.box_id,
"session-id": self.get_token(),
"tenant-code": self.TENANT_CODE,
"content-type": "application/json"}
res = self.session.http.post(self._api_url + path, data=json.dumps(post_data), headers=headers).json()
data = dict((k, v and json.loads(self.decrypt_data(v, key, iv)))for k, v in res.items())
return data
def login(self, username, password):
log.debug("Trying to login...")
resp = self.api_request(
"send",
{
"login_id": username,
"login_key": password,
"login_mode": "1",
"manufacturer": "123"
},
{"request": "signin"}
)
return resp['data']['status']
def _get_streams(self):
"""
Finds the streams from ustvnow.com.
"""
if self.login(self.get_option("username"), self.get_option("password")):
path = urlparse(self.url).path.strip("/")
resp = self.api_request("send", {"path": path}, {"request": "page/stream"})
if resp['data']['status']:
for stream in resp['data']['response']['streams']:
if stream['keys']['licenseKey']:
log.warning("Stream possibly protected by DRM")
yield from HLSStream.parse_variant_playlist(self.session, stream['url']).items()
else:
log.error("Could not find any streams: {code}: {message}".format(**resp['data']['error']))
else:
log.error("Failed to login, check username and password")
__plugin__ = USTVNow
| 35.131429 | 110 | 0.568315 |
c1252619abd11d0cd04fc397531e68836dd3b73d | 13,154 | py | Python | launcher/mac_tray.py | TDUncle/XX-Net | 24b2af60dc0abc1c26211813064bb14c1e22bac8 | [
"BSD-2-Clause"
] | 1 | 2016-03-28T03:27:59.000Z | 2016-03-28T03:27:59.000Z | launcher/mac_tray.py | TDUncle/XX-Net | 24b2af60dc0abc1c26211813064bb14c1e22bac8 | [
"BSD-2-Clause"
] | null | null | null | launcher/mac_tray.py | TDUncle/XX-Net | 24b2af60dc0abc1c26211813064bb14c1e22bac8 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# coding:utf-8
import os
import sys
import config
current_path = os.path.dirname(os.path.abspath(__file__))
helper_path = os.path.join(current_path, os.pardir, 'data', 'launcher', 'helper')
if __name__ == "__main__":
python_path = os.path.abspath( os.path.join(current_path, os.pardir, 'python27', '1.0'))
noarch_lib = os.path.abspath( os.path.join(python_path, 'lib', 'noarch'))
sys.path.append(noarch_lib)
osx_lib = os.path.join(python_path, 'lib', 'darwin')
sys.path.append(osx_lib)
extra_lib = "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/PyObjC"
sys.path.append(extra_lib)
import config
import module_init
import subprocess
import webbrowser
from AppKit import *
from SystemConfiguration import *
from instances import xlog
from PyObjCTools import AppHelper
class MacTrayObject(NSObject):
def __init__(self):
pass
def applicationDidFinishLaunching_(self, notification):
setupHelper()
self.setupUI()
self.registerObserver()
def setupUI(self):
self.statusbar = NSStatusBar.systemStatusBar()
self.statusitem = self.statusbar.statusItemWithLength_(NSSquareStatusItemLength) #NSSquareStatusItemLength #NSVariableStatusItemLength
# Set initial image icon
icon_path = os.path.join(current_path, "web_ui", "favicon-mac.ico")
image = NSImage.alloc().initByReferencingFile_(icon_path)
image.setScalesWhenResized_(True)
image.setSize_((20, 20))
self.statusitem.setImage_(image)
# Let it highlight upon clicking
self.statusitem.setHighlightMode_(1)
self.statusitem.setToolTip_("XX-Net")
# Get current selected mode
proxyState = getProxyState(currentService)
# Build a very simple menu
self.menu = NSMenu.alloc().initWithTitle_('XX-Net')
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Config', 'config:', '')
self.menu.addItem_(menuitem)
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(getCurrentServiceMenuItemTitle(), None, '')
self.menu.addItem_(menuitem)
self.currentServiceMenuItem = menuitem
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Enable Auto GAEProxy', 'enableAutoProxy:', '')
if proxyState == 'pac':
menuitem.setState_(NSOnState)
self.menu.addItem_(menuitem)
self.autoGaeProxyMenuItem = menuitem
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Enable Global GAEProxy', 'enableGlobalProxy:', '')
if proxyState == 'gae':
menuitem.setState_(NSOnState)
self.menu.addItem_(menuitem)
self.globalGaeProxyMenuItem = menuitem
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Disable GAEProxy', 'disableProxy:', '')
if proxyState == 'disable':
menuitem.setState_(NSOnState)
self.menu.addItem_(menuitem)
self.disableGaeProxyMenuItem = menuitem
# Reset Menu Item
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Reload GAEProxy', 'resetGoagent:', '')
self.menu.addItem_(menuitem)
# Default event
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Quit', 'windowWillClose:', '')
self.menu.addItem_(menuitem)
# Bind it to the status item
self.statusitem.setMenu_(self.menu)
# Hide dock icon
NSApp.setActivationPolicy_(NSApplicationActivationPolicyProhibited)
def updateStatusBarMenu(self):
self.currentServiceMenuItem.setTitle_(getCurrentServiceMenuItemTitle())
# Remove Tick before All Menu Items
self.autoGaeProxyMenuItem.setState_(NSOffState)
self.globalGaeProxyMenuItem.setState_(NSOffState)
self.disableGaeProxyMenuItem.setState_(NSOffState)
# Get current selected mode
proxyState = getProxyState(currentService)
# Update Tick before Menu Item
if proxyState == 'pac':
self.autoGaeProxyMenuItem.setState_(NSOnState)
elif proxyState == 'gae':
self.globalGaeProxyMenuItem.setState_(NSOnState)
elif proxyState == 'disable':
self.disableGaeProxyMenuItem.setState_(NSOnState)
# Trigger autovalidation
self.menu.update()
def validateMenuItem_(self, menuItem):
return currentService or (menuItem != self.autoGaeProxyMenuItem and
menuItem != self.globalGaeProxyMenuItem and
menuItem != self.disableGaeProxyMenuItem)
def presentAlert_withTitle_(self, msg, title):
self.performSelectorOnMainThread_withObject_waitUntilDone_('presentAlertWithInfo:', [title, msg], True)
return self.alertReturn
def presentAlertWithInfo_(self, info):
alert = NSAlert.alloc().init()
alert.setMessageText_(info[0])
alert.setInformativeText_(info[1])
alert.addButtonWithTitle_("OK")
alert.addButtonWithTitle_("Cancel")
self.alertReturn = alert.runModal() == NSAlertFirstButtonReturn
def registerObserver(self):
nc = NSWorkspace.sharedWorkspace().notificationCenter()
nc.addObserver_selector_name_object_(self, 'windowWillClose:', NSWorkspaceWillPowerOffNotification, None)
def windowWillClose_(self, notification):
executeResult = subprocess.check_output(['networksetup', '-listallnetworkservices'])
services = executeResult.split('\n')
services = filter(lambda service : service and service.find('*') == -1 and getProxyState(service) != 'disable', services) # Remove disabled services and empty lines
if len(services) > 0:
try:
map(helperDisableAutoProxy, services)
map(helperDisableGlobalProxy, services)
except:
disableAutoProxyCommand = ';'.join(map(getDisableAutoProxyCommand, services))
disableGlobalProxyCommand = ';'.join(map(getDisableGlobalProxyCommand, services))
rootCommand = """osascript -e 'do shell script "%s;%s" with administrator privileges' """ % (disableAutoProxyCommand, disableGlobalProxyCommand)
executeCommand = rootCommand.encode('utf-8')
xlog.info("try disable proxy:%s", executeCommand)
os.system(executeCommand)
module_init.stop_all()
os._exit(0)
NSApp.terminate_(self)
def config_(self, notification):
host_port = config.get(["modules", "launcher", "control_port"], 8085)
webbrowser.open_new("http://127.0.0.1:%s/" % host_port)
def resetGoagent_(self, _):
module_init.stop("gae_proxy")
module_init.start("gae_proxy")
def enableAutoProxy_(self, _):
try:
helperDisableGlobalProxy(currentService)
helperEnableAutoProxy(currentService)
except:
disableGlobalProxyCommand = getDisableGlobalProxyCommand(currentService)
enableAutoProxyCommand = getEnableAutoProxyCommand(currentService)
rootCommand = """osascript -e 'do shell script "%s;%s" with administrator privileges' """ % (disableGlobalProxyCommand, enableAutoProxyCommand)
executeCommand = rootCommand.encode('utf-8')
xlog.info("try enable auto proxy:%s", executeCommand)
os.system(executeCommand)
self.updateStatusBarMenu()
def enableGlobalProxy_(self, _):
try:
helperDisableAutoProxy(currentService)
helperEnableGlobalProxy(currentService)
except:
disableAutoProxyCommand = getDisableAutoProxyCommand(currentService)
enableGlobalProxyCommand = getEnableGlobalProxyCommand(currentService)
rootCommand = """osascript -e 'do shell script "%s;%s" with administrator privileges' """ % (disableAutoProxyCommand, enableGlobalProxyCommand)
executeCommand = rootCommand.encode('utf-8')
xlog.info("try enable global proxy:%s", executeCommand)
os.system(executeCommand)
self.updateStatusBarMenu()
def disableProxy_(self, _):
try:
helperDisableAutoProxy(currentService)
helperDisableGlobalProxy(currentService)
except:
disableAutoProxyCommand = getDisableAutoProxyCommand(currentService)
disableGlobalProxyCommand = getDisableGlobalProxyCommand(currentService)
rootCommand = """osascript -e 'do shell script "%s;%s" with administrator privileges' """ % (disableAutoProxyCommand, disableGlobalProxyCommand)
executeCommand = rootCommand.encode('utf-8')
xlog.info("try disable proxy:%s", executeCommand)
os.system(executeCommand)
self.updateStatusBarMenu()
def setupHelper():
try:
with open(os.devnull) as devnull:
subprocess.check_call(helper_path, stderr=devnull)
except:
cpCommand = "cp \\\"%s\\\" \\\"%s\\\"" % (os.path.join(current_path, 'mac_helper'), helper_path)
chmodCommand = "chmod 4777 \\\"%s\\\"" % helper_path
chownCommand = "chown root \\\"%s\\\"" % helper_path
rootCommand = """osascript -e 'do shell script "%s;%s;%s" with administrator privileges' """ % (cpCommand, chmodCommand, chownCommand)
executeCommand = rootCommand.encode('utf-8')
xlog.info("try setup helper:%s", executeCommand)
os.system(executeCommand)
def getCurrentServiceMenuItemTitle():
if currentService:
return 'Connection: %s' % currentService
else:
return 'Connection: None'
def getProxyState(service):
if not service:
return
# Check if auto proxy is enabled
executeResult = subprocess.check_output(['networksetup', '-getautoproxyurl', service])
if ( executeResult.find('http://127.0.0.1:8086/proxy.pac\nEnabled: Yes') != -1 ):
return "pac"
# Check if global proxy is enabled
executeResult = subprocess.check_output(['networksetup', '-getwebproxy', service])
if ( executeResult.find('Enabled: Yes\nServer: 127.0.0.1\nPort: 8087') != -1 ):
return "gae"
return "disable"
# Generate commands for Apple Script
def getEnableAutoProxyCommand(service):
return "networksetup -setautoproxyurl \\\"%s\\\" \\\"http://127.0.0.1:8086/proxy.pac\\\"" % service
def getDisableAutoProxyCommand(service):
return "networksetup -setautoproxystate \\\"%s\\\" off" % service
def getEnableGlobalProxyCommand(service):
enableHttpProxyCommand = "networksetup -setwebproxy \\\"%s\\\" 127.0.0.1 8087" % service
enableHttpsProxyCommand = "networksetup -setsecurewebproxy \\\"%s\\\" 127.0.0.1 8087" % service
return "%s;%s" % (enableHttpProxyCommand, enableHttpsProxyCommand)
def getDisableGlobalProxyCommand(service):
disableHttpProxyCommand = "networksetup -setwebproxystate \\\"%s\\\" off" % service
disableHttpsProxyCommand = "networksetup -setsecurewebproxystate \\\"%s\\\" off" % service
return "%s;%s" % (disableHttpProxyCommand, disableHttpsProxyCommand)
# Call helper
def helperEnableAutoProxy(service):
subprocess.check_call([helper_path, 'enableauto', service, 'http://127.0.0.1:8086/proxy.pac'])
def helperDisableAutoProxy(service):
subprocess.check_call([helper_path, 'disableauto', service])
def helperEnableGlobalProxy(service):
subprocess.check_call([helper_path, 'enablehttp', service, '127.0.0.1', '8087'])
subprocess.check_call([helper_path, 'enablehttps', service, '127.0.0.1', '8087'])
def helperDisableGlobalProxy(service):
subprocess.check_call([helper_path, 'disablehttp', service])
subprocess.check_call([helper_path, 'disablehttps', service])
sys_tray = MacTrayObject.alloc().init()
currentService = None
def fetchCurrentService(protocol):
global currentService
status = SCDynamicStoreCopyValue(None, "State:/Network/Global/" + protocol)
if not status:
currentService = None
return
serviceID = status['PrimaryService']
service = SCDynamicStoreCopyValue(None, "Setup:/Network/Service/" + serviceID)
if not service:
currentService = None
return
currentService = service['UserDefinedName']
@objc.callbackFor(CFNotificationCenterAddObserver)
def networkChanged(center, observer, name, object, userInfo):
fetchCurrentService('IPv4')
sys_tray.updateStatusBarMenu()
# Note: the following code can't run in class
def serve_forever():
app = NSApplication.sharedApplication()
app.setDelegate_(sys_tray)
# Listen for network change
nc = CFNotificationCenterGetDarwinNotifyCenter()
CFNotificationCenterAddObserver(nc, None, networkChanged, "com.apple.system.config.network_change", None, CFNotificationSuspensionBehaviorDeliverImmediately)
fetchCurrentService('IPv4')
AppHelper.runEventLoop()
def main():
serve_forever()
if __name__ == '__main__':
main()
| 41.10625 | 174 | 0.681694 |
8aaeeea38f1574567acad214e22060048d2ed112 | 7,118 | py | Python | troveclient/openstack/common/apiclient/auth.py | cp16net/python-troveclient | 502f7113a832ae8f0ffcf44bf7cc8ea1fbc4877b | [
"Apache-2.0"
] | null | null | null | troveclient/openstack/common/apiclient/auth.py | cp16net/python-troveclient | 502f7113a832ae8f0ffcf44bf7cc8ea1fbc4877b | [
"Apache-2.0"
] | null | null | null | troveclient/openstack/common/apiclient/auth.py | cp16net/python-troveclient | 502f7113a832ae8f0ffcf44bf7cc8ea1fbc4877b | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
# Copyright 2013 Spanish National Research Council.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# E0202: An attribute inherited from %s hide this method
# pylint: disable=E0202
import abc
import argparse
import logging
import os
from stevedore import extension
from troveclient.openstack.common.apiclient import exceptions
logger = logging.getLogger(__name__)
_discovered_plugins = {}
def discover_auth_systems():
"""Discover the available auth-systems.
This won't take into account the old style auth-systems.
"""
global _discovered_plugins
_discovered_plugins = {}
def add_plugin(ext):
_discovered_plugins[ext.name] = ext.plugin
ep_namespace = "troveclient.openstack.common.apiclient.auth"
mgr = extension.ExtensionManager(ep_namespace)
mgr.map(add_plugin)
def load_auth_system_opts(parser):
"""Load options needed by the available auth-systems into a parser.
This function will try to populate the parser with options from the
available plugins.
"""
group = parser.add_argument_group("Common auth options")
BaseAuthPlugin.add_common_opts(group)
for name, auth_plugin in _discovered_plugins.iteritems():
group = parser.add_argument_group(
"Auth-system '%s' options" % name,
conflict_handler="resolve")
auth_plugin.add_opts(group)
def load_plugin(auth_system):
try:
plugin_class = _discovered_plugins[auth_system]
except KeyError:
raise exceptions.AuthSystemNotFound(auth_system)
return plugin_class(auth_system=auth_system)
def load_plugin_from_args(args):
"""Load requred plugin and populate it with options.
Try to guess auth system if it is not specified. Systems are tried in
alphabetical order.
:type args: argparse.Namespace
:raises: AuthorizationFailure
"""
auth_system = args.os_auth_system
if auth_system:
plugin = load_plugin(auth_system)
plugin.parse_opts(args)
plugin.sufficient_options()
return plugin
for plugin_auth_system in sorted(_discovered_plugins.iterkeys()):
plugin_class = _discovered_plugins[plugin_auth_system]
plugin = plugin_class()
plugin.parse_opts(args)
try:
plugin.sufficient_options()
except exceptions.AuthPluginOptionsMissing:
continue
return plugin
raise exceptions.AuthPluginOptionsMissing(["auth_system"])
class BaseAuthPlugin(object):
"""Base class for authentication plugins.
An authentication plugin needs to override at least the authenticate
method to be a valid plugin.
"""
__metaclass__ = abc.ABCMeta
auth_system = None
opt_names = []
common_opt_names = [
"auth_system",
"username",
"password",
"tenant_name",
"token",
"auth_url",
]
def __init__(self, auth_system=None, **kwargs):
self.auth_system = auth_system or self.auth_system
self.opts = dict((name, kwargs.get(name))
for name in self.opt_names)
@staticmethod
def _parser_add_opt(parser, opt):
"""Add an option to parser in two variants.
:param opt: option name (with underscores)
"""
dashed_opt = opt.replace("_", "-")
env_var = "OS_%s" % opt.upper()
arg_default = os.environ.get(env_var, "")
arg_help = "Defaults to env[%s]." % env_var
parser.add_argument(
"--os-%s" % dashed_opt,
metavar="<%s>" % dashed_opt,
default=arg_default,
help=arg_help)
parser.add_argument(
"--os_%s" % opt,
metavar="<%s>" % dashed_opt,
help=argparse.SUPPRESS)
@classmethod
def add_opts(cls, parser):
"""Populate the parser with the options for this plugin.
"""
for opt in cls.opt_names:
# use `BaseAuthPlugin.common_opt_names` since it is never
# changed in child classes
if opt not in BaseAuthPlugin.common_opt_names:
cls._parser_add_opt(parser, opt)
@classmethod
def add_common_opts(cls, parser):
"""Add options that are common for several plugins.
"""
for opt in cls.common_opt_names:
cls._parser_add_opt(parser, opt)
@staticmethod
def get_opt(opt_name, args):
"""Return option name and value.
:param opt_name: name of the option, e.g., "username"
:param args: parsed arguments
"""
return (opt_name, getattr(args, "os_%s" % opt_name, None))
def parse_opts(self, args):
"""Parse the actual auth-system options if any.
This method is expected to populate the attribute `self.opts` with a
dict containing the options and values needed to make authentication.
"""
self.opts.update(dict(self.get_opt(opt_name, args)
for opt_name in self.opt_names))
def authenticate(self, http_client):
"""Authenticate using plugin defined method.
The method usually analyses `self.opts` and performs
a request to authentication server.
:param http_client: client object that needs authentication
:type http_client: HTTPClient
:raises: AuthorizationFailure
"""
self.sufficient_options()
self._do_authenticate(http_client)
@abc.abstractmethod
def _do_authenticate(self, http_client):
"""Protected method for authentication.
"""
def sufficient_options(self):
"""Check if all required options are present.
:raises: AuthPluginOptionsMissing
"""
missing = [opt
for opt in self.opt_names
if not self.opts.get(opt)]
if missing:
raise exceptions.AuthPluginOptionsMissing(missing)
@abc.abstractmethod
def token_and_endpoint(self, endpoint_type, service_type):
"""Return token and endpoint.
:param service_type: Service type of the endpoint
:type service_type: string
:param endpoint_type: Type of endpoint.
Possible values: public or publicURL,
internal or internalURL,
admin or adminURL
:type endpoint_type: string
:returns: tuple of token and endpoint strings
:raises: EndpointException
"""
| 31.219298 | 78 | 0.649621 |
a33bb7124233e7163ef7d2e540d8d3220f8247c9 | 574 | py | Python | config/celery_app.py | Swianka/router-map | fc338624633f001cffa29021e3b696f89b7c501f | [
"MIT"
] | 3 | 2022-02-10T10:20:21.000Z | 2022-02-10T10:20:36.000Z | config/celery_app.py | Swianka/router-map | fc338624633f001cffa29021e3b696f89b7c501f | [
"MIT"
] | 7 | 2020-05-19T13:41:10.000Z | 2021-09-27T10:15:16.000Z | config/celery_app.py | Swianka/router-map | fc338624633f001cffa29021e3b696f89b7c501f | [
"MIT"
] | 2 | 2019-10-02T10:06:44.000Z | 2019-10-03T08:02:07.000Z | import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
app = Celery("router-map")
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object("django.conf:settings", namespace="CELERY")
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
| 33.764706 | 72 | 0.78223 |
a0ee8704a65df4af3fec9d3ad0698de0de7009f5 | 317 | py | Python | code/api/health.py | CiscoSecurity/tr-05-serverless-rsa-netwitness | f9d2fe554efceede3c06bcee40062405b1f971d5 | [
"MIT"
] | null | null | null | code/api/health.py | CiscoSecurity/tr-05-serverless-rsa-netwitness | f9d2fe554efceede3c06bcee40062405b1f971d5 | [
"MIT"
] | null | null | null | code/api/health.py | CiscoSecurity/tr-05-serverless-rsa-netwitness | f9d2fe554efceede3c06bcee40062405b1f971d5 | [
"MIT"
] | null | null | null | from flask import Blueprint
from api.utils import jsonify_data, get_node_info, get_credentials
health_api = Blueprint('health', __name__)
@health_api.route('/health', methods=['POST'])
def health():
credentials = get_credentials()
_ = get_node_info(credentials)
return jsonify_data({'status': 'ok'})
| 22.642857 | 66 | 0.735016 |
d0ad769075ff87984bc6a6b4117cc8a136307219 | 2,438 | py | Python | setup.py | cjwatson/python-openid | 557dc2eac99b29feda2c7f207f7ad6cbe90dde09 | [
"Apache-2.0"
] | null | null | null | setup.py | cjwatson/python-openid | 557dc2eac99b29feda2c7f207f7ad6cbe90dde09 | [
"Apache-2.0"
] | null | null | null | setup.py | cjwatson/python-openid | 557dc2eac99b29feda2c7f207f7ad6cbe90dde09 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import sys
from setuptools import setup
if 'sdist' in sys.argv:
os.system('./admin/makedoc')
# Import version from openid library itself
VERSION = __import__('openid').__version__
INSTALL_REQUIRES = [
'six',
'cryptography',
'lxml;platform_python_implementation=="CPython"',
'lxml <4.0;platform_python_implementation=="PyPy"',
]
EXTRAS_REQUIRE = {
'quality': ('flake8', 'isort'),
'tests': ('mock', 'testfixtures', 'responses', 'coverage'),
# Optional dependencies for fetchers
'httplib2': ('httplib2', ),
'pycurl': ('pycurl', ),
'requests': ('requests', ),
# Dependencies for Django example
'djopenid': ('django<1.11.99', ),
}
LONG_DESCRIPTION = open('README.md').read() + '\n\n' + open('Changelog.md').read()
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Systems Administration :: Authentication/Directory',
]
setup(
name='python-openid2',
version=VERSION,
description='Python OpenID library - OpenID support for servers and consumers.',
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
url='https://github.com/ziima/python-openid',
packages=['openid',
'openid.consumer',
'openid.server',
'openid.store',
'openid.yadis',
'openid.extensions',
'openid.extensions.draft',
],
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
# license specified by classifier.
# license=getLicense(),
author='Vlastimil Zíma',
author_email='vlastimil.zima@gmail.com',
classifiers=CLASSIFIERS,
)
| 33.39726 | 84 | 0.631665 |
ed823982b3671bbdf139afc6df0213684419cb51 | 2,023 | py | Python | tests/graphql/stack_overflow/graphene_python_list_resolve_null_for_all_fields/tests.py | karlosss/simple_api | 03f87035c648f161d5e7a59b24f4e04bd34399f1 | [
"MIT"
] | 2 | 2020-11-13T14:00:06.000Z | 2020-12-19T11:50:22.000Z | tests/graphql/stack_overflow/graphene_python_list_resolve_null_for_all_fields/tests.py | karlosss/simple_api | 03f87035c648f161d5e7a59b24f4e04bd34399f1 | [
"MIT"
] | 5 | 2021-02-04T14:27:43.000Z | 2021-06-04T23:22:24.000Z | tests/graphql/stack_overflow/graphene_python_list_resolve_null_for_all_fields/tests.py | karlosss/simple_api | 03f87035c648f161d5e7a59b24f4e04bd34399f1 | [
"MIT"
] | 1 | 2021-01-06T13:54:38.000Z | 2021-01-06T13:54:38.000Z | from .objects import schema
from tests.graphql.graphql_test_utils import remove_ws, GraphQLTestCase
class Test(GraphQLTestCase):
GRAPHQL_SCHEMA = schema
REF_GRAPHQL_SCHEMA = """
schema {
query: Query
}
type ActionInfo {
name: String!
permitted: Boolean!
deny_reason: String
retry_in: Duration
}
scalar Duration
type ObjectInfo {
name: String!
pk_field: String
actions: [ActionInfo!]!
}
type Query {
users: [User!]!
__objects: [ObjectInfo!]!
__actions: [ActionInfo!]!
}
type User {
id: Int
username: String
email: String
__actions: [ActionInfo!]!
}
"""
REF_META_SCHEMA = {
"data": {
"__objects": [
{
"name": "User",
"pk_field": None,
"actions": []
}
],
"__actions": [
{
"name": "users",
"permitted": True,
"deny_reason": None,
"retry_in": None
}
]
}
}
def test_request(self):
resp = self.query(
"""
query{
users{
id
username
email
}
}
"""
)
exp = {
"data": {
"users": [
{
"id": 39330,
"username": "RCraig",
"email": "WRussell@dolor.gov"
},
{
"id": 39331,
"username": "AHohmann",
"email": "AMarina@sapien.com"
}
]
}
}
self.assertResponseNoErrors(resp)
self.assertJSONEqual(resp.content, exp)
| 21.752688 | 71 | 0.371231 |
c9e2ecf85fd9cf01aa8adccae64ae7d4280ab2a9 | 3,332 | py | Python | train.py | icoxfog417/tying-wv-and-wc | 6975adbe7bbc90e9f838271f9233e2810d0cef70 | [
"MIT"
] | 44 | 2017-06-13T12:33:27.000Z | 2021-06-09T11:54:54.000Z | train.py | icoxfog417/tying-wv-and-wc | 6975adbe7bbc90e9f838271f9233e2810d0cef70 | [
"MIT"
] | 4 | 2017-08-01T14:41:27.000Z | 2018-11-19T08:55:35.000Z | train.py | icoxfog417/tying-wv-and-wc | 6975adbe7bbc90e9f838271f9233e2810d0cef70 | [
"MIT"
] | 8 | 2017-06-16T21:01:15.000Z | 2018-12-27T04:01:56.000Z | import os
import argparse
import numpy as np
from model.one_hot_model import OneHotModel
from model.augmented_model import AugmentedModel
from model.data_processor import DataProcessor
from model.setting import ProposedSetting
DATA_ROOT = os.path.join(os.path.dirname(__file__), "data")
LOG_ROOT = os.path.join(os.path.dirname(__file__), "log")
MODEL_ROOT = os.path.join(os.path.dirname(__file__), "trained_model")
def prepare_dataset(dataset_kind):
dp = DataProcessor()
if dataset_kind == "ptb":
dataset = dp.get_ptb(DATA_ROOT, vocab_size=10000)
else:
dataset = dp.get_wiki2(DATA_ROOT, vocab_size=30000)
return dataset
def train_baseline(network_size, dataset_kind, epochs=40, stride=0):
# prepare the data
setting = ProposedSetting(network_size, dataset_kind)
dataset = prepare_dataset(dataset_kind)
vocab_size = len(dataset.vocab_data())
sequence_size = 20
dp = DataProcessor()
train_steps, train_generator = dp.make_batch_iter(dataset, sequence_size=sequence_size, stride=stride)
valid_steps, valid_generator = dp.make_batch_iter(dataset, kind="valid", sequence_size=sequence_size, stride=stride)
# make one hot model
model = OneHotModel(vocab_size, sequence_size, setting, LOG_ROOT)
model.compile()
model.fit_generator(train_generator, train_steps, valid_generator, valid_steps, epochs=epochs)
model.save(MODEL_ROOT)
def train_augmented(network_size, dataset_kind, tying=False, epochs=40, stride=0):
# prepare the data
setting = ProposedSetting(network_size, dataset_kind)
dataset = prepare_dataset(dataset_kind)
vocab_size = len(dataset.vocab_data())
sequence_size = 20
dp = DataProcessor()
train_steps, train_generator = dp.make_batch_iter(dataset, sequence_size=sequence_size, stride=stride)
valid_steps, valid_generator = dp.make_batch_iter(dataset, kind="valid", sequence_size=sequence_size, stride=stride)
# make one hot model
model = AugmentedModel(vocab_size, sequence_size, setting, tying=tying, checkpoint_path=LOG_ROOT)
model.compile()
model.fit_generator(train_generator, train_steps, valid_generator, valid_steps, epochs=epochs)
model.save(MODEL_ROOT)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Experiment")
parser.add_argument("--aug", action="store_const", const=True, default=False,
help="use augmented model")
parser.add_argument("--tying", action="store_const", const=True, default=False,
help="use tying model")
parser.add_argument("--nsize", default="small", help="network size (small, medium, large)")
parser.add_argument("--dataset", default="ptb", help="dataset kind (ptb or wiki2)")
parser.add_argument("--epochs", type=int, default=40, help="epoch to train")
parser.add_argument("--stride", type=int, default=0, help="stride of the sequence")
args = parser.parse_args()
n_size = args.nsize
dataset = args.dataset
if not os.path.exists(LOG_ROOT):
os.mkdir(LOG_ROOT)
if args.aug or args.tying:
print("Use Augmented Model (tying={})".format(args.tying))
train_augmented(n_size, dataset, args.tying, args.epochs, args.stride)
else:
train_baseline(n_size, dataset, args.epochs, args.stride)
| 39.666667 | 120 | 0.727791 |
7d4e90f7c7b116389ad004efc9253f6234f619b4 | 398 | py | Python | codedvlogs/codedvlogs/wsgi.py | codedskills/django | 0724f5c862ea6b3710edf0715292ccf1a67ac3b3 | [
"MIT"
] | null | null | null | codedvlogs/codedvlogs/wsgi.py | codedskills/django | 0724f5c862ea6b3710edf0715292ccf1a67ac3b3 | [
"MIT"
] | null | null | null | codedvlogs/codedvlogs/wsgi.py | codedskills/django | 0724f5c862ea6b3710edf0715292ccf1a67ac3b3 | [
"MIT"
] | null | null | null | """
WSGI config for codedvlogs project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "codedvlogs.settings")
application = get_wsgi_application()
| 23.411765 | 78 | 0.788945 |
043a351fcd457c110b4c1af453771bbe0ee53d5c | 687 | py | Python | example/tests/test_example1_etl.py | elau1004/ETLite | 8d3eff4963e98029636f76476ff028f16736b17e | [
"MIT"
] | 1 | 2020-02-06T04:35:31.000Z | 2020-02-06T04:35:31.000Z | example/tests/test_example1_etl.py | elau1004/ETLite | 8d3eff4963e98029636f76476ff028f16736b17e | [
"MIT"
] | 20 | 2019-12-16T05:20:24.000Z | 2021-06-02T00:54:51.000Z | example/tests/test_example1_etl.py | elau1004/ETLite | 8d3eff4963e98029636f76476ff028f16736b17e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Edward Lau <elau1004@netscape.net>
# Licensed under the MIT License.
#
import unittest
from etlite.common.base_etl import BaseEtl
from etlite.common.base_restapi_etl import BaseRestApiEtl
from example.base_example_restapi_etl import BaseExampleRestApiEtl
from example.example1_etl import Example1Etl
from tests.base_test import BaseTest
class TestEtlite( BaseTest ):
def test_inheritance( self ):
s = Example1Etl()
self.assertIsInstance( s ,BaseEtl )
self.assertIsInstance( s ,BaseRestApiEtl )
self.assertIsInstance( s ,BaseExampleRestApiEtl )
if "__main__" == __name__:
unittest.main()
| 25.444444 | 67 | 0.739447 |
ca03aacdb749e5b5f431145f46d64555f1774b88 | 3,085 | py | Python | venv/Lib/site-packages/PyInstaller/__init__.py | johnarn/price_observer_bot | 3e2fa54dd2217e43eef862b13c28e4afbe13ebff | [
"MIT"
] | 4 | 2019-08-28T21:01:08.000Z | 2021-06-30T06:27:35.000Z | venv/Lib/site-packages/PyInstaller/__init__.py | johnarn/price_observer_bot | 3e2fa54dd2217e43eef862b13c28e4afbe13ebff | [
"MIT"
] | 5 | 2019-11-10T16:20:09.000Z | 2019-12-02T14:23:58.000Z | venv/Lib/site-packages/PyInstaller/__init__.py | johnarn/price_observer_bot | 3e2fa54dd2217e43eef862b13c28e4afbe13ebff | [
"MIT"
] | 2 | 2019-08-27T22:21:05.000Z | 2021-06-30T06:27:41.000Z | #-----------------------------------------------------------------------------
# Copyright (c) 2005-2019, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
__all__ = ('HOMEPATH', 'PLATFORM', '__version__')
import os
import sys
from . import compat
from .compat import is_win, is_py2
from .utils.git import get_repo_revision
# Note: Keep this variable as plain string so it could be updated automatically
# when doing a release.
__version__ = '3.5'
# Absolute path of this package's directory. Save this early so all
# submodules can use the absolute path. This is required e.g. if the
# current directorey changes prior to loading the hooks.
PACKAGEPATH = os.path.abspath(os.path.dirname(__file__))
HOMEPATH = os.path.dirname(PACKAGEPATH)
if is_win and is_py2:
# This ensures for Python 2 that PyInstaller will work on Windows
# with paths containing foreign characters.
try:
unicode(HOMEPATH)
except UnicodeDecodeError:
# Do conversion to ShortPathName really only in case HOMEPATH is not
# ascii only - conversion to unicode type cause this unicode error.
try:
HOMEPATH = compat.win32api.GetShortPathName(HOMEPATH)
except ImportError:
pass
# Update __version__ as necessary.
if os.path.exists(os.path.join(HOMEPATH, 'setup.py')):
# PyInstaller is run directly from source without installation or
# __version__ is called from 'setup.py' ...
if compat.getenv('PYINSTALLER_DO_RELEASE') == '1':
# Suppress the git revision when doing a release.
pass
elif 'sdist' not in sys.argv:
# and 'setup.py' was not called with 'sdist' argument.
# For creating source tarball we do not want git revision
# in the filename.
try:
__version__ += get_repo_revision()
except Exception:
# Write to stderr because stdout is used for eval() statement
# in some subprocesses.
sys.stderr.write('WARN: failed to parse git revision')
else:
# PyInstaller was installed by `python setup.py install'.
import pkg_resources
__version__ = pkg_resources.get_distribution('PyInstaller').version
## Default values of paths where to put files created by PyInstaller.
## Mind option-help in build_main when changes these
# Folder where to put created .spec file.
DEFAULT_SPECPATH = compat.getcwd()
# Folder where to put created .spec file.
# Where to put the final app.
DEFAULT_DISTPATH = os.path.join(compat.getcwd(), 'dist')
# Where to put all the temporary work files, .log, .pyz and etc.
DEFAULT_WORKPATH = os.path.join(compat.getcwd(), 'build')
PLATFORM = compat.system + '-' + compat.architecture
# Include machine name in path to bootloader for some machines.
# e.g. 'arm'
if compat.machine:
PLATFORM += '-' + compat.machine
| 36.294118 | 79 | 0.672609 |
ca6315d1530621f01c9a4f8b09db0e0bf207b2d8 | 461 | py | Python | Scripts/python/scripts mundo 1/JOGO CURSO EM VIDEO/exercicio017.py | BrenoNAlmeida/Scripts-Escola | 20d886d0401ef7f40a4a46e307eadbf5b1c0a5eb | [
"Apache-2.0"
] | null | null | null | Scripts/python/scripts mundo 1/JOGO CURSO EM VIDEO/exercicio017.py | BrenoNAlmeida/Scripts-Escola | 20d886d0401ef7f40a4a46e307eadbf5b1c0a5eb | [
"Apache-2.0"
] | null | null | null | Scripts/python/scripts mundo 1/JOGO CURSO EM VIDEO/exercicio017.py | BrenoNAlmeida/Scripts-Escola | 20d886d0401ef7f40a4a46e307eadbf5b1c0a5eb | [
"Apache-2.0"
] | null | null | null | from math import hypot
co = float (input('Comprimento do cateto oposto: '))
ca = float (input('Comprimento do cateto adiacente: '))
hi = hypot(co, ca)
print('A hipotenusa vai medir {:.2f}'.format(hi))
opcao4 = ''
while opcao4!= 'S' and opcao4 != 'N':
opcao4 = str(input('você deseja executar novamente [S/N]?')).upper()[0]
if opcao4 == 'S':
import JOGO
if opcao4 == 'N':
print('obrigado por ultilizar nossos serviços')
break
| 32.928571 | 75 | 0.629067 |
ba3ac2aee5d63ad969a4ead42f13611c1c51da8f | 954 | py | Python | analysis/Python_Scripts/runSpacy.py | J-Wengler/NLP_Paper | b5f1952e04172289adfe935297a1fcfd42e2bf70 | [
"CC-BY-4.0",
"CC0-1.0"
] | 1 | 2021-11-04T11:03:35.000Z | 2021-11-04T11:03:35.000Z | analysis/Python_Scripts/runSpacy.py | J-Wengler/NLP_Paper | b5f1952e04172289adfe935297a1fcfd42e2bf70 | [
"CC-BY-4.0",
"CC0-1.0"
] | null | null | null | analysis/Python_Scripts/runSpacy.py | J-Wengler/NLP_Paper | b5f1952e04172289adfe935297a1fcfd42e2bf70 | [
"CC-BY-4.0",
"CC0-1.0"
] | null | null | null | from helper import *
import multiprocessing
import spacy
import sys
import time
numKeywords = int(sys.argv[1])
vectorSize = int(sys.argv[2])
maxCandidateArticles = int(sys.argv[3])
reducedSet = str(sys.argv[4])
printTimestamp("Getting candidate articles")
if reducedSet=='true':
candidate_articles = getCandidateArticles(maxCandidateArticles, True)
else:
candidate_articles = getCandidateArticles(maxCandidateArticles, False)
printTimestamp("Loading Spacy")
model = spacy.load("en_core_web_lg")
start = time.time()
for query in range(6,7):
for keywordExtractor in ["TopicRank", "TfIdf", "KPMiner", "YAKE", "TextRank", "SingleRank", "TopicalPageRank", "PositionRank", "MultipartiteRank"]:
mp = multiprocessing.Process(target=findSimilarity, args=(keywordExtractor, "SpaCy", model, candidate_articles, query, numKeywords, vectorSize))
mp.start()
#mp.join()
end = time.time()
print('{:.4f} s'.format(end - start))
| 29.8125 | 152 | 0.733753 |
30c792f22226838886f4f18b1257bb3231509f9c | 5,739 | py | Python | lib/Mako-0.4.1/mako/filters.py | AniX/webapp-improved | a6bd7d4a98642ce0f708e7d53c66f70f168d02f6 | [
"Apache-2.0"
] | 15 | 2015-01-18T17:30:31.000Z | 2019-10-25T17:14:41.000Z | lib/Mako-0.4.1/mako/filters.py | AniX/webapp-improved | a6bd7d4a98642ce0f708e7d53c66f70f168d02f6 | [
"Apache-2.0"
] | 1 | 2015-09-30T03:15:36.000Z | 2015-09-30T03:15:36.000Z | lib/Mako-0.4.1/mako/filters.py | AniX/webapp-improved | a6bd7d4a98642ce0f708e7d53c66f70f168d02f6 | [
"Apache-2.0"
] | 2 | 2015-06-17T23:01:13.000Z | 2015-07-08T23:10:19.000Z | # mako/filters.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re, urllib, htmlentitydefs, codecs
from StringIO import StringIO
from mako import util
xml_escapes = {
'&' : '&',
'>' : '>',
'<' : '<',
'"' : '"', # also " in html-only
"'" : ''' # also ' in html-only
}
# XXX: " is valid in HTML and XML
# ' is not valid HTML, but is valid XML
def legacy_html_escape(string):
"""legacy HTML escape for non-unicode mode."""
return re.sub(r'([&<"\'>])', lambda m: xml_escapes[m.group()], string)
try:
import markupsafe
html_escape = markupsafe.escape
except ImportError:
html_escape = legacy_html_escape
def xml_escape(string):
return re.sub(r'([&<"\'>])', lambda m: xml_escapes[m.group()], string)
def url_escape(string):
# convert into a list of octets
string = string.encode("utf8")
return urllib.quote_plus(string)
def url_unescape(string):
text = urllib.unquote_plus(string)
if not is_ascii_str(text):
text = text.decode("utf8")
return text
def trim(string):
return string.strip()
class Decode(object):
def __getattr__(self, key):
def decode(x):
if isinstance(x, unicode):
return x
elif not isinstance(x, str):
return unicode(str(x), encoding=key)
else:
return unicode(x, encoding=key)
return decode
decode = Decode()
_ASCII_re = re.compile(r'\A[\x00-\x7f]*\Z')
def is_ascii_str(text):
return isinstance(text, str) and _ASCII_re.match(text)
################################################################
class XMLEntityEscaper(object):
def __init__(self, codepoint2name, name2codepoint):
self.codepoint2entity = dict([(c, u'&%s;' % n)
for c,n in codepoint2name.iteritems()])
self.name2codepoint = name2codepoint
def escape_entities(self, text):
"""Replace characters with their character entity references.
Only characters corresponding to a named entity are replaced.
"""
return unicode(text).translate(self.codepoint2entity)
def __escape(self, m):
codepoint = ord(m.group())
try:
return self.codepoint2entity[codepoint]
except (KeyError, IndexError):
return '&#x%X;' % codepoint
__escapable = re.compile(r'["&<>]|[^\x00-\x7f]')
def escape(self, text):
"""Replace characters with their character references.
Replace characters by their named entity references.
Non-ASCII characters, if they do not have a named entity reference,
are replaced by numerical character references.
The return value is guaranteed to be ASCII.
"""
return self.__escapable.sub(self.__escape, unicode(text)
).encode('ascii')
# XXX: This regexp will not match all valid XML entity names__.
# (It punts on details involving involving CombiningChars and Extenders.)
#
# .. __: http://www.w3.org/TR/2000/REC-xml-20001006#NT-EntityRef
__characterrefs = re.compile(r'''& (?:
\#(\d+)
| \#x([\da-f]+)
| ( (?!\d) [:\w] [-.:\w]+ )
) ;''',
re.X | re.UNICODE)
def __unescape(self, m):
dval, hval, name = m.groups()
if dval:
codepoint = int(dval)
elif hval:
codepoint = int(hval, 16)
else:
codepoint = self.name2codepoint.get(name, 0xfffd)
# U+FFFD = "REPLACEMENT CHARACTER"
if codepoint < 128:
return chr(codepoint)
return unichr(codepoint)
def unescape(self, text):
"""Unescape character references.
All character references (both entity references and numerical
character references) are unescaped.
"""
return self.__characterrefs.sub(self.__unescape, text)
_html_entities_escaper = XMLEntityEscaper(htmlentitydefs.codepoint2name,
htmlentitydefs.name2codepoint)
html_entities_escape = _html_entities_escaper.escape_entities
html_entities_unescape = _html_entities_escaper.unescape
def htmlentityreplace_errors(ex):
"""An encoding error handler.
This python `codecs`_ error handler replaces unencodable
characters with HTML entities, or, if no HTML entity exists for
the character, XML character references.
>>> u'The cost was \u20ac12.'.encode('latin1', 'htmlentityreplace')
'The cost was €12.'
"""
if isinstance(ex, UnicodeEncodeError):
# Handle encoding errors
bad_text = ex.object[ex.start:ex.end]
text = _html_entities_escaper.escape(bad_text)
return (unicode(text), ex.end)
raise ex
codecs.register_error('htmlentityreplace', htmlentityreplace_errors)
# TODO: options to make this dynamic per-compilation will be added in a later release
DEFAULT_ESCAPES = {
'x':'filters.xml_escape',
'h':'filters.html_escape',
'u':'filters.url_escape',
'trim':'filters.trim',
'entity':'filters.html_entities_escape',
'unicode':'unicode',
'decode':'decode',
'str':'str',
'n':'n'
}
if util.py3k:
DEFAULT_ESCAPES.update({
'unicode':'str'
})
NON_UNICODE_ESCAPES = DEFAULT_ESCAPES.copy()
NON_UNICODE_ESCAPES['h'] = 'filters.legacy_html_escape'
| 30.365079 | 85 | 0.603067 |
1b0e9315d08a15b070f1d3eee622cb42355584c8 | 8,041 | py | Python | faculty/config.py | facultyai/faculty | a5eb7bc55343ed47b94125100077123695443035 | [
"Apache-2.0"
] | 11 | 2019-02-08T11:11:24.000Z | 2022-02-05T13:30:01.000Z | faculty/config.py | imrehg/faculty | 147c45d7d07dfeaee661b190a3f54937d3eeda87 | [
"Apache-2.0"
] | 70 | 2019-02-05T16:15:24.000Z | 2022-02-23T07:27:28.000Z | faculty/config.py | imrehg/faculty | 147c45d7d07dfeaee661b190a3f54937d3eeda87 | [
"Apache-2.0"
] | 6 | 2019-08-04T10:30:07.000Z | 2021-12-26T14:57:23.000Z | # Copyright 2018-2021 Faculty Science Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Load library configuration from files, environment variables or code.
"""
import os
import warnings
from collections import namedtuple
from configparser import ConfigParser, NoSectionError, NoOptionError
Profile = namedtuple(
"Profile", ["domain", "protocol", "client_id", "client_secret"]
)
DEFAULT_PROFILE = "default"
DEFAULT_DOMAIN = "services.cloud.my.faculty.ai"
DEFAULT_PROTOCOL = "https"
def load(path):
"""Read the Faculty configuration from a file.
Parameters
----------
path : str or pathlib.Path
The path of the file to load configuration from.
Returns
-------
Dict[str, Profile]
The profiles loaded from the file, keyed by their names.
"""
parser = ConfigParser()
parser.read(str(path))
def _get(section, option):
try:
return parser.get(section, option)
except (NoSectionError, NoOptionError):
return None
profiles = {}
for section in parser.sections():
profiles[section] = Profile(
domain=_get(section, "domain"),
protocol=_get(section, "protocol"),
client_id=_get(section, "client_id"),
client_secret=_get(section, "client_secret"),
)
return profiles
def load_profile(path, profile):
"""Read a single profile from a file.
Parameters
----------
path : str or pathlib.Path
The path of the file to load the profile from.
profile : str
The name of the profile to load.
Returns
-------
Profile
The loaded profile. If the requested profile is not present, an empty
profile (with all None values) is returned.
"""
profiles = load(path)
try:
return profiles[profile]
except KeyError:
return Profile(None, None, None, None)
def _default_credentials_path():
xdg_config_home = os.environ.get("XDG_CONFIG_HOME")
if not xdg_config_home:
xdg_config_home = os.path.expanduser("~/.config")
default_path = os.path.join(xdg_config_home, "faculty", "credentials")
legacy_path = os.path.join(xdg_config_home, "sherlockml", "credentials")
if not os.path.exists(default_path) and os.path.exists(legacy_path):
template = (
"Reading credentials from {legacy_path}. Credentials at this path "
"are deprecated - please migrate by moving them to {default_path}."
)
warnings.warn(
template.format(legacy_path=legacy_path, default_path=default_path)
)
return legacy_path
else:
return default_path
def _get_deprecated_env_var(key, expected_key):
value = os.getenv(key)
if value:
template = (
"The environment variable {key} is deprecated. "
"Please migrate by using {expected_key}."
)
warnings.warn(template.format(key=key, expected_key=expected_key))
return value
def resolve_credentials_path(credentials_path=None):
"""Determine which credentials file to load.
This function implements the order of precendence in which the path of the
credentials file can be configured. This order is (highed priority first):
* The path passed to this function
* The environment variable ``FACULTY_CREDENTIALS_PATH``
* ``~/.config/faculty/credentials``
The last path will be relative to the XDG home directory, when this is
configured.
"""
return (
credentials_path
or os.getenv("FACULTY_CREDENTIALS_PATH")
or _get_deprecated_env_var(
"SHERLOCKML_CREDENTIALS_PATH", "FACULTY_CREDENTIALS_PATH"
)
or _default_credentials_path()
)
class CredentialsError(RuntimeError):
"""An error was encourntered when loading Faculty credentials."""
pass
def _raise_credentials_error(type_):
raise CredentialsError("No {} found".format(type_))
def resolve_profile(
credentials_path=None,
profile_name=None,
domain=None,
protocol=None,
client_id=None,
client_secret=None,
):
"""Resolve all sources of configuration to load a Faculty profile.
This function implements the order of precendence in which configuration
entries are determined from files, environment variables and code.
Configuration entries are determined in this order of priority (highest
first):
* The value passed to this function
* The value set in an environment variable
* The value read from a configuration file
The logic for determining the configuration file is described in
:func:`resolve_credentials_path`.
The profile read from the configuration file will be, in order of priority:
* The value passed to this function
* The value set in the environment variable ``FACULTY_PROFILE``
* ``default``
Parameters
----------
credentials_path : str or pathlib.Path, optional
The path of the credentials file to load. Can also be set with the
environment variable ``FACULTY_CREDENTIALS_PATH``.
profile_name : str, optional
The name of the profile to load from the credentials file. Can also be
set with the environment variable ``FACULTY_PROFILE``.
domain : str, optional
The domain name where Faculty services are hosted. Can also be set with
the environment variable ``FACULTY_DOMAIN``.
protocol : str, optional
The protocol to use when making requests to Faculty services. Can also
be set with the environment variable ``FACULTY_PROTOCOL``.
client_id : str, optional
The OAuth client ID to authenticate requests with. Can also be set with
the environment variable ``FACULTY_CLIENT_ID``.
client_secret : str, optional
The OAuth client secret to authenticate requests with. Can also be set
with the environment variable ``FACULTY_CLIENT_SECRET``.
Returns
-------
Profile
The resolved Faculty profile.
"""
resolved_profile_name = (
profile_name
or os.getenv("FACULTY_PROFILE")
or _get_deprecated_env_var("SHERLOCKML_PROFILE", "FACULTY_PROFILE")
or DEFAULT_PROFILE
)
profile = load_profile(
resolve_credentials_path(credentials_path), resolved_profile_name
)
resolved_domain = (
domain
or os.getenv("FACULTY_DOMAIN")
or _get_deprecated_env_var("SHERLOCKML_DOMAIN", "FACULTY_DOMAIN")
or profile.domain
or DEFAULT_DOMAIN
)
resolved_protocol = (
protocol
or os.getenv("FACULTY_PROTOCOL")
or _get_deprecated_env_var("SHERLOCKML_PROTOCOL", "FACULTY_PROTOCOL")
or profile.protocol
or DEFAULT_PROTOCOL
)
resolved_client_id = (
client_id
or os.getenv("FACULTY_CLIENT_ID")
or _get_deprecated_env_var("SHERLOCKML_CLIENT_ID", "FACULTY_CLIENT_ID")
or profile.client_id
or _raise_credentials_error("client_id")
)
resolved_client_secret = (
client_secret
or os.getenv("FACULTY_CLIENT_SECRET")
or _get_deprecated_env_var(
"SHERLOCKML_CLIENT_SECRET", "FACULTY_CLIENT_SECRET"
)
or profile.client_secret
or _raise_credentials_error("client_secret")
)
return Profile(
domain=resolved_domain,
protocol=resolved_protocol,
client_id=resolved_client_id,
client_secret=resolved_client_secret,
)
| 29.781481 | 79 | 0.678149 |
eb33b53469f0d82b0d4550ea0557433dc7d3db3c | 3,210 | py | Python | google/ads/googleads/v6/errors/types/query_error.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v6/errors/types/query_error.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v6/errors/types/query_error.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v6.errors",
marshal="google.ads.googleads.v6",
manifest={"QueryErrorEnum",},
)
class QueryErrorEnum(proto.Message):
r"""Container for enum describing possible query errors."""
class QueryError(proto.Enum):
r"""Enum describing possible query errors."""
UNSPECIFIED = 0
UNKNOWN = 1
QUERY_ERROR = 50
BAD_ENUM_CONSTANT = 18
BAD_ESCAPE_SEQUENCE = 7
BAD_FIELD_NAME = 12
BAD_LIMIT_VALUE = 15
BAD_NUMBER = 5
BAD_OPERATOR = 3
BAD_PARAMETER_NAME = 61
BAD_PARAMETER_VALUE = 62
BAD_RESOURCE_TYPE_IN_FROM_CLAUSE = 45
BAD_SYMBOL = 2
BAD_VALUE = 4
DATE_RANGE_TOO_WIDE = 36
DATE_RANGE_TOO_NARROW = 60
EXPECTED_AND = 30
EXPECTED_BY = 14
EXPECTED_DIMENSION_FIELD_IN_SELECT_CLAUSE = 37
EXPECTED_FILTERS_ON_DATE_RANGE = 55
EXPECTED_FROM = 44
EXPECTED_LIST = 41
EXPECTED_REFERENCED_FIELD_IN_SELECT_CLAUSE = 16
EXPECTED_SELECT = 13
EXPECTED_SINGLE_VALUE = 42
EXPECTED_VALUE_WITH_BETWEEN_OPERATOR = 29
INVALID_DATE_FORMAT = 38
INVALID_STRING_VALUE = 57
INVALID_VALUE_WITH_BETWEEN_OPERATOR = 26
INVALID_VALUE_WITH_DURING_OPERATOR = 22
INVALID_VALUE_WITH_LIKE_OPERATOR = 56
OPERATOR_FIELD_MISMATCH = 35
PROHIBITED_EMPTY_LIST_IN_CONDITION = 28
PROHIBITED_ENUM_CONSTANT = 54
PROHIBITED_FIELD_COMBINATION_IN_SELECT_CLAUSE = 31
PROHIBITED_FIELD_IN_ORDER_BY_CLAUSE = 40
PROHIBITED_FIELD_IN_SELECT_CLAUSE = 23
PROHIBITED_FIELD_IN_WHERE_CLAUSE = 24
PROHIBITED_RESOURCE_TYPE_IN_FROM_CLAUSE = 43
PROHIBITED_RESOURCE_TYPE_IN_SELECT_CLAUSE = 48
PROHIBITED_RESOURCE_TYPE_IN_WHERE_CLAUSE = 58
PROHIBITED_METRIC_IN_SELECT_OR_WHERE_CLAUSE = 49
PROHIBITED_SEGMENT_IN_SELECT_OR_WHERE_CLAUSE = 51
PROHIBITED_SEGMENT_WITH_METRIC_IN_SELECT_OR_WHERE_CLAUSE = 53
LIMIT_VALUE_TOO_LOW = 25
PROHIBITED_NEWLINE_IN_STRING = 8
PROHIBITED_VALUE_COMBINATION_IN_LIST = 10
PROHIBITED_VALUE_COMBINATION_WITH_BETWEEN_OPERATOR = 21
STRING_NOT_TERMINATED = 6
TOO_MANY_SEGMENTS = 34
UNEXPECTED_END_OF_QUERY = 9
UNEXPECTED_FROM_CLAUSE = 47
UNRECOGNIZED_FIELD = 32
UNEXPECTED_INPUT = 11
REQUESTED_METRICS_FOR_MANAGER = 59
FILTER_HAS_TOO_MANY_VALUES = 63
__all__ = tuple(sorted(__protobuf__.manifest))
| 34.891304 | 74 | 0.704361 |
938f8b4f88dc76626aa225f65c44f8c248eae26c | 2,312 | py | Python | samples/openapi3/client/petstore/python-experimental/petstore_api/model/danish_pig.py | mariotoffia/openapi-generator | 878f6e5709e20f496f04cc4184ef7cf715e9c61f | [
"Apache-2.0"
] | null | null | null | samples/openapi3/client/petstore/python-experimental/petstore_api/model/danish_pig.py | mariotoffia/openapi-generator | 878f6e5709e20f496f04cc4184ef7cf715e9c61f | [
"Apache-2.0"
] | 1 | 2022-03-30T16:31:07.000Z | 2022-03-30T16:31:07.000Z | samples/openapi3/client/petstore/python-experimental/petstore_api/model/danish_pig.py | yields-io/openapi-generator | 949b4e2008421e4abdae17ac5d3cf07c600ee1e0 | [
"Apache-2.0"
] | 1 | 2022-02-06T21:14:46.000Z | 2022-02-06T21:14:46.000Z | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import typing # noqa: F401
from frozendict import frozendict # noqa: F401
import decimal # noqa: F401
from datetime import date, datetime # noqa: F401
from frozendict import frozendict # noqa: F401
from petstore_api.schemas import ( # noqa: F401
AnyTypeSchema,
ComposedSchema,
DictSchema,
ListSchema,
StrSchema,
IntSchema,
Int32Schema,
Int64Schema,
Float32Schema,
Float64Schema,
NumberSchema,
DateSchema,
DateTimeSchema,
DecimalSchema,
BoolSchema,
BinarySchema,
NoneSchema,
none_type,
InstantiationMetadata,
Unset,
unset,
ComposedBase,
ListBase,
DictBase,
NoneBase,
StrBase,
IntBase,
Int32Base,
Int64Base,
Float32Base,
Float64Base,
NumberBase,
DateBase,
DateTimeBase,
BoolBase,
BinaryBase,
Schema,
_SchemaValidator,
_SchemaTypeChecker,
_SchemaEnumMaker
)
class DanishPig(
DictSchema
):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
_required_property_names = set((
'className',
))
class className(
_SchemaEnumMaker(
enum_value_to_name={
"DanishPig": "DANISHPIG",
}
),
StrSchema
):
@classmethod
@property
def DANISHPIG(cls):
return cls._enum_by_value["DanishPig"]("DanishPig")
def __new__(
cls,
*args: typing.Union[dict, frozendict, ],
className: className,
_instantiation_metadata: typing.Optional[InstantiationMetadata] = None,
**kwargs: typing.Type[Schema],
) -> 'DanishPig':
return super().__new__(
cls,
*args,
className=className,
_instantiation_metadata=_instantiation_metadata,
**kwargs,
)
| 21.407407 | 174 | 0.620675 |
5780fb3c4ed155b2271a46363dff65281309c5f0 | 1,751 | py | Python | vbb_backend/session/api/viewsets/sessionMentor.py | patrickb42/backend-vbb-portal | 88362bc5b4d5cab95aa67e12694f98371604b65a | [
"MIT"
] | 3 | 2021-04-14T02:59:09.000Z | 2021-06-08T00:17:27.000Z | vbb_backend/session/api/viewsets/sessionMentor.py | patrickb42/backend-vbb-portal | 88362bc5b4d5cab95aa67e12694f98371604b65a | [
"MIT"
] | 81 | 2020-12-08T00:11:52.000Z | 2021-08-09T18:13:32.000Z | vbb_backend/session/api/viewsets/sessionMentor.py | patrickb42/backend-vbb-portal | 88362bc5b4d5cab95aa67e12694f98371604b65a | [
"MIT"
] | 5 | 2021-01-12T04:50:26.000Z | 2021-06-04T02:00:03.000Z | from rest_framework.exceptions import PermissionDenied
from rest_framework.generics import get_object_or_404
from rest_framework.permissions import IsAuthenticated
from rest_framework.viewsets import GenericViewSet
from rest_framework.mixins import ListModelMixin, RetrieveModelMixin, UpdateModelMixin
from vbb_backend.session.api.serializers.sessionMentor import (
MentorSessionListSerializer,
)
from vbb_backend.session.models import Session
from vbb_backend.session.models import MentorSessionAssociation
from vbb_backend.users.models import UserTypeEnum
class MentorSessionViewSet(
ListModelMixin, RetrieveModelMixin, UpdateModelMixin, GenericViewSet
):
queryset = MentorSessionAssociation.objects.all()
permission_classes = [IsAuthenticated, ]
serializer_class = MentorSessionListSerializer
lookup_field = "external_id"
def get_queryset(self):
queryset = self.queryset
user = self.request.user
session = Session.objects.get(
external_id=self.kwargs.get("session_external_id")
)
queryset = queryset.filter(session=session)
if user.is_superuser:
pass
elif user.user_type == UserTypeEnum.HEADMASTER.value:
queryset = queryset.filter(
session__computer__program__program_director=user
)
elif user.user_type == UserTypeEnum.MENTOR.value:
queryset = queryset.filter(mentors__mentor__user__in=user)
elif user.user_type == UserTypeEnum.STUDENT.value and self.action in [
"list",
"retrieve",
]:
queryset = queryset.filter(students__student__user__in=user)
else:
raise PermissionDenied()
return queryset
| 38.065217 | 86 | 0.727584 |
b1767cf46839f79f659115c31d8268645ca1653c | 3,608 | py | Python | backend/app/models/user.py | lorneez/mini-amazon | 0b707cec7e8e704fa40c537f39274ba4a50e6c36 | [
"MIT"
] | 2 | 2021-10-21T02:30:38.000Z | 2021-10-21T22:17:15.000Z | backend/app/models/user.py | lorneez/mini-amazon | 0b707cec7e8e704fa40c537f39274ba4a50e6c36 | [
"MIT"
] | null | null | null | backend/app/models/user.py | lorneez/mini-amazon | 0b707cec7e8e704fa40c537f39274ba4a50e6c36 | [
"MIT"
] | null | null | null | from flask import current_app as app
from werkzeug.security import generate_password_hash, check_password_hash
import jwt
import datetime
from flask_login import UserMixin
class User(UserMixin):
def __init__(self, id, email, name, is_seller, balance, address):
self.id = id
self.email = email
self.name = name
self.is_seller = is_seller
self.balance = balance
self.address = address
@staticmethod
def get_by_auth(email, password):
rows = app.db.execute("""
SELECT password, id, is_seller
FROM Users
WHERE email = :email
""",
email=email)
if not rows: # email not found
print("here1")
return None
elif check_password_hash(rows[0][0], password):
# incorrect password
print("here2")
return [rows[0][1], rows[0][2]]
else:
print("here3")
return None
@staticmethod
def encode_auth_token(user_id):
# Generates the Auth Token
# return: encoded jwt token and datetime object of expiration
try:
expiration = datetime.datetime.utcnow() + datetime.timedelta(days=0, hours=5)
payload = {
'exp': expiration,
'iat': datetime.datetime.utcnow(),
'sub': user_id
}
token = jwt.encode(
payload,
app.config.get('SECRET_KEY'),
algorithm='HS256'
)
return [token, expiration]
except Exception as e:
return e
@staticmethod
def email_exists(email):
rows = app.db.execute("""
SELECT email
FROM Users
WHERE email = :email
""",
email=email)
return len(rows) > 0
@staticmethod
def register(email, password, name, address, is_seller=False):
try:
rows = app.db.execute("""
INSERT INTO Users(email, password, name, is_seller, balance, address)
VALUES(:email, :password, :name, :is_seller, :balance, :address)
RETURNING id
""",
email = email,
password = generate_password_hash(password),
name = name,
is_seller = is_seller,
balance = 0,
address = address)
id = rows[0][0]
return User.get(id)
except Exception:
# likely email already in use; better error checking and
# reporting needed
return None
@staticmethod
def get(id):
rows = app.db.execute("""
SELECT id, email, name, is_seller, balance, address
FROM Users
WHERE id = :id
""",
id=id)
return User(*(rows[0])) if rows else None
@staticmethod
def change_balance(id, difference):
try:
rows = app.db.execute('''
UPDATE Users
SET balance = balance+:difference
WHERE id=:id
RETURNING :id
''', difference=difference, id=id)
if rows is None:
return None
return rows[0][0]
except Exception:
return None
@staticmethod
def get_balance(uid):
try:
rows = app.db.execute('''
SELECT balance
FROM Users
WHERE id=:uid
''', uid=uid)
if rows is None:
return None
return rows[0][0]
except Exception:
return None | 29.333333 | 89 | 0.520787 |
b71657e9024e8c90c05741e5aa9f35c8be68a689 | 4,086 | py | Python | models/base_gru_net.py | se122811/3D-R2N2-PyTorch | 18ceddbd5b1cf7d58cf56388f48337a1220b2a98 | [
"MIT"
] | 46 | 2018-07-04T10:07:48.000Z | 2021-09-02T01:41:38.000Z | models/base_gru_net.py | ssbnsk99/3D-R2N2-PyTorch | 18ceddbd5b1cf7d58cf56388f48337a1220b2a98 | [
"MIT"
] | 12 | 2019-01-07T02:10:16.000Z | 2021-06-04T02:39:29.000Z | models/base_gru_net.py | ssbnsk99/3D-R2N2-PyTorch | 18ceddbd5b1cf7d58cf56388f48337a1220b2a98 | [
"MIT"
] | 13 | 2019-05-30T15:55:14.000Z | 2021-02-18T07:51:33.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 17 21:04:40 2018
@author: wangchu
"""
from models.net import Net
from lib.layers import SoftmaxWithLoss3D
import torch
from torch.autograd import Variable
##########################################################################################
# #
# GRUNet definition using PyTorch #
# #
##########################################################################################
class BaseGRUNet(Net):
"""
This class is used to define some common attributes and methods that both GRUNet and
ResidualGRUNet have. Note that GRUNet and ResidualGRUNet have the same loss function
and forward pass. The only difference is different encoder and decoder architecture.
"""
def __init__(self):
print("initializing \"BaseGRUNet\"")
super(BaseGRUNet, self).__init__()
"""
Set the necessary data of the network
"""
self.is_x_tensor4 = False
self.n_gru_vox = 4
#the size of x is (num_views, batch_size, 3, img_w, img_h)
self.input_shape = (self.batch_size, 3, self.img_w, self.img_h)
#number of filters for each convolution layer in the encoder
self.n_convfilter = [96, 128, 256, 256, 256, 256]
#the dimension of the fully connected layer
self.n_fc_filters = [1024]
#number of filters for each 3d convolution layer in the decoder
self.n_deconvfilter = [128, 128, 128, 64, 32, 2]
#the size of the hidden state
self.h_shape = (self.batch_size, self.n_deconvfilter[0], self.n_gru_vox, self.n_gru_vox, self.n_gru_vox)
#the filter shape of the 3d convolutional gru unit
self.conv3d_filter_shape = (self.n_deconvfilter[0], self.n_deconvfilter[0], 3, 3, 3)
#set the last layer
self.SoftmaxWithLoss3D = SoftmaxWithLoss3D()
#set the encoder and the decoder of the network
self.encoder = None
self.decoder = None
def forward(self, x, y=None, test=True):
#ensure that the network has encoder and decoder attributes
if self.encoder is None:
raise Exception("subclass network of BaseGRUNet must define the \"encoder\" attribute")
if self.decoder is None:
raise Exception("subclass network of BaseGRUNet must define the \"decoder\" attribute")
#initialize the hidden state and update gate
h = self.initHidden(self.h_shape)
u = self.initHidden(self.h_shape)
#a list used to store intermediate update gate activations
u_list = []
"""
x is the input and the size of x is (num_views, batch_size, channels, heights, widths).
h and u is the hidden state and activation of last time step respectively.
The following loop computes the forward pass of the whole network.
"""
for time in range(x.size(0)):
gru_out, update_gate = self.encoder(x[time], h, u, time)
h = gru_out
u = update_gate
u_list.append(u)
out = self.decoder(h)
"""
If test is True and y is None, then the out is the [prediction].
If test is True and y is not None, then the out is [prediction, loss].
If test is False and y is not None, then the out is loss.
"""
out = self.SoftmaxWithLoss3D(out, y=y, test=test)
if test:
out.extend(u_list)
return out
def initHidden(self, h_shape):
h = torch.zeros(h_shape)
if torch.cuda.is_available():
h = h.cuda()
return Variable(h)
| 35.530435 | 112 | 0.535242 |
f5bf24f049b626090bccdda9047adf97df1f18b1 | 2,539 | py | Python | update_pth.py | FIT4003StackGAN/Automated-Metamorphic-Testing-on-StackGAN-v2 | 699446e10120d7f0c43cc0a3ec224a4a2997d81c | [
"MIT"
] | null | null | null | update_pth.py | FIT4003StackGAN/Automated-Metamorphic-Testing-on-StackGAN-v2 | 699446e10120d7f0c43cc0a3ec224a4a2997d81c | [
"MIT"
] | null | null | null | update_pth.py | FIT4003StackGAN/Automated-Metamorphic-Testing-on-StackGAN-v2 | 699446e10120d7f0c43cc0a3ec224a4a2997d81c | [
"MIT"
] | null | null | null | #! /usr/bin/python
import argparse
import glob
import os
import sys
def get_contents(yml_dir):
yml_file = open(yml_dir)
lines = yml_file.readlines()
yml_file.close()
return lines
def get_pth_files(pth_dir):
pth_files = [os.path.basename(x) for x in glob.glob(pth_dir)]
pth_files.sort()
return pth_files
def init(yml_dir, pth_dir, netG):
lines = get_contents(yml_dir)
pth_files = get_pth_files(pth_dir)
default = " NET_G: '../models/birds_3stages/{}'\n"
for i in range(len(lines)):
if "/netG_" in lines[i]:
try:
init_pth = netG if netG in pth_files else pth_files[0]
lines[i] = default.format(init_pth)
yml_file = open(yml_dir, "w")
new_file_contents = "".join(lines)
yml_file.write(new_file_contents)
yml_file.close()
return init_pth[5:11]
except:
return None
def update(yml_dir, pth_dir):
lines = get_contents(yml_dir)
pth_files = get_pth_files(pth_dir)
default = " NET_G: '../models/birds_3stages/{}'\n"
for i in range(len(lines)):
if "/netG_" in lines[i]:
index = lines[i].find("netG_")
prev_pth = lines[i][index:index+15]
try:
prev_pth_i = pth_files.index(prev_pth)
except:
prev_pth_i = -1
try:
next_pth = pth_files[prev_pth_i+1]
lines[i] = default.format(next_pth)
yml_file = open(yml_dir, "w")
new_file_contents = "".join(lines)
yml_file.write(new_file_contents)
yml_file.close()
return next_pth[5:11]
except:
return None
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog="Automate Test")
parser.add_argument("-n", "--netG", dest="netG", default='netG_200000')
parser.add_argument("-m", "--mode", dest="mode", default='update')
parser.add_argument("-d", "--base_dir", dest="base_dir", default="/home/monash/Desktop/StackGAN-v2-master/code")
args = parser.parse_args()
yml_dir = args.base_dir + "/cfg/eval_birds.yml"
pth_dir = args.base_dir + "/../models/birds_3stages/*.pth"
if args.mode.lower() == "update":
print(update(yml_dir, pth_dir))
elif args.mode.lower() == "init":
print(init(yml_dir, pth_dir, args.netG))
| 29.870588 | 116 | 0.562032 |
6b5ae2169dc22cf1e6234909773c2dd39cb2d001 | 4,372 | py | Python | effunet/effunet.py | pranshu97/effunet | 71dfae6a7a2a9558b08389ea8025e9477e18b32a | [
"MIT"
] | 19 | 2020-12-17T17:01:55.000Z | 2022-02-08T10:10:05.000Z | effunet/effunet.py | pranshu97/effunet | 71dfae6a7a2a9558b08389ea8025e9477e18b32a | [
"MIT"
] | 2 | 2020-12-24T14:02:52.000Z | 2021-05-27T06:35:50.000Z | effunet/effunet.py | pranshu97/effunet | 71dfae6a7a2a9558b08389ea8025e9477e18b32a | [
"MIT"
] | 3 | 2021-01-19T06:26:24.000Z | 2021-12-06T11:33:35.000Z | import torch
import torch.nn as nn
import torchvision.transforms as T
from efficientnet_pytorch import EfficientNet
# Utility Functions for the model
def double_conv(in_,out_,drop): # Double convolution layer for decoder
conv = nn.Sequential(
nn.Conv2d(in_,out_,kernel_size=3,padding=(1,1)),
nn.ReLU(inplace=True),
nn.Conv2d(out_,out_,kernel_size=3,padding=(1,1)),
nn.ReLU(inplace=True),
nn.Dropout(drop)
)
return conv
def crop(tensor,target_tensor): # Crop tensor to target tensor size
target_shape = target_tensor.shape[2]
return T.CenterCrop(target_shape)(tensor)
# Hook functions to get values of intermediate layers for cross connection
hook_values = []
def hook(_, input, output):
global hook_values
hook_values.append(output) # stores values of each layers in hook_values
indices = []
shapes = []
def init_hook(model,device):
global shapes, indices, hook_values
for i in range(len(model._blocks)):
model._blocks[i].register_forward_hook(hook) #register hooks
image = torch.rand([1,3,576,576])
image = image.to(device)
out = model(image) # generate hook values to get shapes
shape = [i.shape for i in hook_values] # get shape of all layers
for i in range(len(shape)-1):
if shape[i][2]!=shape[i+1][2]: # get indices of layers only where output dimension change
indices.append(i)
indices.append(len(shape)-1) # get last layer index
shapes = [shape[i] for i in indices] # get shapes of required layers
shapes = shapes[::-1]
encoder_out = []
def epoch_hook(model, image):
global encoder_out, indices, hook_values
hook_values = []
out = model(image) # generate layer outputs with current image
encoder_out = [hook_values[i] for i in indices] # get layer outputs for selected indices
class EffUNet(nn.Module):
def __init__(self,model='b0',out_channels=2,dropout=0.1,freeze_backbone=True,pretrained=True,device='cuda',num_gpu=1):
super(EffUNet,self).__init__()
global layers, shapes
if model not in set(['b0','b1','b2','b3','b4','b5','b6','b7']):
raise Exception(f'{model} unavailable.')
if pretrained:
self.encoder = EfficientNet.from_pretrained(f'efficientnet-{model}')
else:
self.encoder = EfficientNet.from_name(f'efficientnet-{model}')
# Disable non required layers by replacing them with identity
self.encoder._conv_head=torch.nn.Identity()
self.encoder._bn1=torch.nn.Identity()
self.encoder._avg_pooling=torch.nn.Identity()
self.encoder._dropout=torch.nn.Identity()
self.encoder._fc=torch.nn.Identity()
self.encoder._swish=torch.nn.Identity()
if isinstance(device, str):
self.device = torch.device(device)
else:
self.device = device
self.encoder.to(self.device)
self.encoder._conv_stem.stride=1 # change stride of first layer from 2 to 1 to increase o/p size
self.encoder._conv_stem.kernel_size=(1,1) #
# freeze encoder
if freeze_backbone:
for param in self.encoder.parameters():
param.requires_grad = False
# register hooks & get shapes
init_hook(self.encoder,self.device)
# Building decoder
self.decoder = torch.nn.modules.container.ModuleList()
for i in range(len(shapes)-1):
self.decoder.append(torch.nn.modules.container.ModuleList())
self.decoder[i].append(nn.ConvTranspose2d(shapes[i][1],shapes[i][1]-shapes[i+1][1],kernel_size=2,stride=2).to(self.device))
self.decoder[i].append(double_conv(shapes[i][1],shapes[i+1][1],dropout).to(self.device))
#output layer
self.out = nn.Conv2d(shapes[-1][1],out_channels,kernel_size=1).to(self.device)
# Handling multiple GPUs
if num_gpu>1 and device=='cuda':
self.encoder = nn.DataParallel(self.encoder)
def forward(self, image):
global layers
h=image.shape[2]
w=image.shape[3]
if h%8!=0 or w%8!=0:
new_h = round(h/8)*8
new_w = round(w/8)*8
image = T.Resize((new_h,new_w))(image)
# Encoder
epoch_hook(self.encoder, image) # required outputs accumulate in "encoder_out"
#Decoder
x = encoder_out.pop()
for i in range(len(self.decoder)):
x = self.decoder[i][0](x) # conv transpose
prev = encoder_out.pop()
prev = crop(prev,x) # croping for cross connection
prev = torch.cat([x,prev],axis=1) # concatenating
x = self.decoder[i][1](prev) # double conv
#out
x = self.out(x)
return x
# img = torch.rand([1,3,512,512]).cuda()
# model = EffUNet()
# # print(model)
# out = model(img)
# print(out.shape) | 31.007092 | 126 | 0.714776 |
4921416f7fd40696c1531d05ea869b4fd65d280b | 1,330 | py | Python | test/system/auto/simple/nativeMap.py | marciosilva/accumulo | 70404cbd1e0a2d2b7c2235009e158979abeef35f | [
"Apache-2.0"
] | 3 | 2021-11-11T05:18:23.000Z | 2021-11-11T05:18:43.000Z | test/system/auto/simple/nativeMap.py | jatrost/accumulo | 6be40f2f3711aaa7d0b68b5b6852b79304af3cff | [
"Apache-2.0"
] | 1 | 2021-06-22T09:52:37.000Z | 2021-06-22T09:52:37.000Z | test/system/auto/simple/nativeMap.py | jatrost/accumulo | 6be40f2f3711aaa7d0b68b5b6852b79304af3cff | [
"Apache-2.0"
] | 1 | 2021-11-09T05:32:32.000Z | 2021-11-09T05:32:32.000Z | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import time
from TestUtils import TestUtilsMixin
class NativeMapTest(TestUtilsMixin, unittest.TestCase):
"Native Map Unit Test"
order = 21
testClass=""
def setUp(self):
pass
def runTest(self):
handle = self.runClassOn('localhost', 'org.apache.accumulo.server.test.functional.NativeMapTest', [])
self.waitForStop(handle, 20)
def tearDown(self):
pass
def suite():
result = unittest.TestSuite()
result.addTest(NativeMapTest())
return result
| 30.930233 | 109 | 0.731579 |
7ea76583f30e30ff8657df52e03dbd234bf90eed | 13,125 | py | Python | main.py | Hunter-DDM/DeFT-naacl2021 | c61aeb4f63a650a0a1b71fb1b0b245cb3925009b | [
"MIT"
] | 6 | 2021-03-14T09:24:51.000Z | 2021-06-20T08:45:50.000Z | main.py | Hunter-DDM/DeFT-naacl2021 | c61aeb4f63a650a0a1b71fb1b0b245cb3925009b | [
"MIT"
] | null | null | null | main.py | Hunter-DDM/DeFT-naacl2021 | c61aeb4f63a650a0a1b71fb1b0b245cb3925009b | [
"MIT"
] | null | null | null | import argparse
import time
import math
from builtins import enumerate
import torch
import torch.nn as nn
import numpy as np
import data
import model
import translate
def load_checkpoint():
checkpoint = torch.load(args.save)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
def save_checkpoint():
check_point = {'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}
torch.save(check_point, args.save)
###############################################################################
# Training code
###############################################################################
def train():
# Turn on training mode which enables dropout.
model.train()
total_loss = 0
batch_iter = corpus.batch_iterator(corpus.train, args.batch_size, cuda=args.cuda, shuffle=True, mode='train', seed_feeding=model.seed_feeding)
batch_num = -(-len(corpus.train) // args.batch_size)
start_time = time.time()
for i, elems in enumerate(batch_iter):
optimizer.zero_grad()
hidden = model.init_hidden(len(elems[0]), model.dhid) # nlayer, batch, d_hid; nlayer, batch, d_hid;
if args.char:
if args.use_formation:
char_emb = model.get_char_embedding(elems[1], elems[7]) # batch, d_char_emb;
else:
char_emb = model.get_char_embedding(elems[1]) # batch, d_char_emb;
else:
char_emb = None
(word, # batch
chars, # batch, d_emb * 2
vec, # batch, d_emb
src, # maxlen, batch
trg, # maxlen + 1
eg, # maxlen_eg, batch
eg_mask, # maxlen_eg, batch
fms, # batch
mor1, # maxlen_mor1, batch
mor1_len, # batch, mor1
mor1_mask, # maxlen_mor1, batch
mor2, # maxlen_mor2, batch
mor2_len, # batch, mor2
mor2_mask, # maxlen_mor2, batch
sm_vecs # batch, d_emb
) = elems
eg_emb = model.get_nn_embedding(eg) # maxlen_eg, batch, 2 * d_hid;
Wh_enc = model.attention.map_enc_states(eg_emb) # batch, maxlen_eg, d_hid;
output, hidden = model(src, hidden, vec, eg_emb, eg_mask, Wh_enc, fms, mor1, mor1_len, mor1_mask, mor2, mor2_len, mor2_mask, sm_vecs, args.cuda, seed_feeding=model.seed_feeding, char_emb=char_emb, teacher_ratio=args.teacher_ratio) # maxlen + 1, batch, vocab
loss = criterion(output.view(output.size(0) * output.size(1), -1), trg) # (maxlen + 1) * batch, vocab; (maxlen + 1) * batch
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
total_loss += loss.data
if (i+1) % args.log_interval == 0:
elapsed = time.time() - start_time
cur_loss = total_loss.item() / args.log_interval
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.07f} | ms/batch {:5.2f} | loss {:5.2f} | ppl {:8.2f}'.format(
epoch, i+1, batch_num, lr, elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)),
flush=True
)
total_loss = 0
start_time = time.time()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Morpheme')
parser.add_argument('--data', type=str, default='./data/noraset',
help='location of the data corpus')
parser.add_argument('--data_usage', type=int, default=100,
help='how many train data to be used (0 - 100 [%])')
parser.add_argument('--vocab_size', type=int, default=-1,
help='vocabulary size (-1 = all vocabl)')
parser.add_argument('--vec', type=str, default='./data/GoogleNews-vectors-negative300.txt',
help='location of the word2vec data')
parser.add_argument('--model', type=str, default='LSTM',
help='type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU)')
parser.add_argument('--emsize', type=int, default=300,
help='size of word embeddings')
parser.add_argument('--init_dist', type=str, default='uniform',
help='distribution to be used to initialize (uniform, xavier)')
parser.add_argument('--init_range', type=float, default=0.05,
help='initialize parameters using uniform distribution between -uniform and uniform.')
parser.add_argument('--init_embedding', action='store_true',
help='initialize word embeddings with read vectors from word2vec')
parser.add_argument('--seed_feeding', action='store_true',
help='feed seed embedding at the first step of decoding')
parser.add_argument('--use_eg', action='store_true',
help='whether to use example information')
parser.add_argument('--simple_eg', action='store_true',
help='whether to use rule-based simple example, for analysis')
parser.add_argument('--use_wordvec', action='store_true',
help='whether to use wordvec information')
parser.add_argument('--use_morpheme', action='store_true',
help='whether to use morpheme information')
parser.add_argument('--use_formation', action='store_true',
help='whether to use formation information')
parser.add_argument('--use_sememe', action='store_true',
help='whether to use sememe information')
parser.add_argument('--teacher_ratio', type=float, default=1.0,
help='teacher forcing ratio')
parser.add_argument('--char', action='store_true',
help='character embedding')
parser.add_argument('--fix_embedding', action='store_true',
help='fix initialized word embeddings')
parser.add_argument('--dhid', type=int, default=300,
help='dimension of hidden states')
parser.add_argument('--nlayers', type=int, default=2,
help='number of layers')
parser.add_argument('--lr', type=float, default=0.001,
help='initial learning rate')
parser.add_argument('--lr_decay', type=float, default=0.5,
help='factor by which learning rate is decayed (lr = lr * factor)')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=500,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=32, metavar='N',
help='batch size')
parser.add_argument('--emb_dropout', type=float, default=0.2,
help='dropout applied to embedding layer (0 = no dropout)')
parser.add_argument('--dropout', type=float, default=0.5,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--tied', action='store_true',
help='tie the word embedding and softmax weights')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--log_interval', type=int, default=200, metavar='N',
help='report interval')
parser.add_argument('--save', type=str, default='model.pt',
help='path to save the final model')
parser.add_argument('--opt', type=str, default='adam',
help='optimizer (adam, sgd)')
parser.add_argument('--sentence_bleu', type=str, default='./sentence-bleu',
help='Compiled binary file of sentece-bleu.cpp')
parser.add_argument('--valid_all', action='store_true',
help='Run validation with all data (only for debugging)')
args = parser.parse_args()
print(args)
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
###############################################################################
# Load data
###############################################################################
corpus = data.MorphemeCorpus(args)
if args.data_usage < 100.0:
corpus.sample_train_data(args.data_usage)
translator = translate.Translator(corpus, sentence_bleu=args.sentence_bleu, valid_all=args.valid_all)
eval_batch_size = 10
###############################################################################
# Build the model
###############################################################################
vocab_size = len(corpus.id2word)
model = model.MORPHEME_EXT(args, corpus)
if args.init_embedding == True:
model.init_embedding(corpus, fix_embedding=args.fix_embedding)
if args.cuda:
model.cuda()
criterion = nn.CrossEntropyLoss(ignore_index=corpus.word2id['<pad>']) # 默认 ignore -100 这个 index,对应 seed 位置生成的词; 改成 <pad> 作为 ign_idx 了
if args.opt == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
elif args.opt == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
# Loop over epochs.
lr = args.lr
best_val_loss = 99999999
best_bleu = -1
no_improvement = 0
# At any point you can hit Ctrl + C to break out of training early.
try:
for epoch in range(1, args.epochs + 1):
epoch_start_time = time.time()
train()
[val_loss] = translator.eval_loss(model, mode='valid', max_batch_size=args.batch_size, cuda=args.cuda)
hyp = translator.greedy(model, mode="valid", max_batch_size=args.batch_size, cuda=args.cuda, max_len=corpus.max_len)
val_bleu_corpus = translator.bleu(hyp, mode="valid", nltk='corpus')
val_bleu_sentence = translator.bleu(hyp, mode="valid", nltk='sentence')
# debug: evaluate training set
hyp_train = translator.greedy(model, mode="train", max_batch_size=args.batch_size, cuda=args.cuda, max_len=corpus.max_len)
val_bleu_corpus_train = translator.bleu(hyp_train, mode="train", nltk='corpus')
val_bleu_sentence_train = translator.bleu(hyp_train, mode="train", nltk='sentence')
print('train_bleu(corpus/sentence): ({:5.2f}/{:5.2f})'.format(val_bleu_corpus_train * 100, val_bleu_sentence_train * 100))
if val_loss < best_val_loss:
save_checkpoint()
best_val_loss = val_loss
best_bleu = val_bleu_sentence # we are interested in the best bleu after ppl stop decreasing
no_improvement = 0
elif val_bleu_sentence > best_bleu:
save_checkpoint()
best_bleu = val_bleu_sentence
no_improvement = 0
else:
no_improvement += 1
if no_improvement == 6:
load_checkpoint()
lr *= args.lr_decay
for param_group in optimizer.param_groups:
param_group['lr'] *= args.lr_decay
if no_improvement == 12:
break
print('-' * 112)
print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | ppl {:8.2f} | BLEU(C/S) {:5.2f} /{:5.2f} | not improved: {:d}'.format(
epoch, (time.time() - epoch_start_time), val_loss, math.exp(val_loss), val_bleu_corpus * 100, val_bleu_sentence * 100, no_improvement),
flush=True
)
print('-' * 112)
except KeyboardInterrupt:
print('-' * 112)
print('Exiting from training early')
# Load the best saved model.
load_checkpoint()
# Run on test data.
hyp = translator.greedy(model, mode="test", max_batch_size=args.batch_size, cuda=args.cuda, max_len=corpus.max_len)
print('-' * 112)
print('Decoded:')
for (word, desc) in hyp:
print(word, end='\t')
new_def = []
for w in desc:
if w not in corpus.id2word:
new_def.append('[' + w + ']')
else:
new_def.append(w)
print(' '.join(new_def), flush=True)
test_loss = translator.eval_loss(model, mode='test', max_batch_size=args.batch_size, cuda=args.cuda)
test_bleu_corpus = translator.bleu(hyp, mode="test", nltk='corpus')
test_bleu_sentence = translator.bleu(hyp, mode="test", nltk='sentence')
print('=' * 112)
print('| End of training | test BLEU (corpus.nltk / sent.nltk): {:5.2f}/{:5.2f}'.format(test_bleu_corpus * 100, test_bleu_sentence * 100))
print('| End of training | best_valid BLEU (sent.nltk): {:5.2f}'.format(best_bleu * 100))
print('=' * 112)
| 47.21223 | 266 | 0.58141 |
8291fb09a2ab2b555d729bc963eb086ac10ad582 | 1,648 | py | Python | napari_allencell_segmenter/core/router.py | neuromusic/napari-allencell-segmenter | c732408023c828c07ec2a425f4f426174d94946b | [
"BSD-3-Clause"
] | 8 | 2021-06-29T09:24:22.000Z | 2022-03-22T23:43:10.000Z | napari_allencell_segmenter/core/router.py | neuromusic/napari-allencell-segmenter | c732408023c828c07ec2a425f4f426174d94946b | [
"BSD-3-Clause"
] | 97 | 2021-02-18T02:39:31.000Z | 2021-06-18T21:38:41.000Z | napari_allencell_segmenter/core/router.py | neuromusic/napari-allencell-segmenter | c732408023c828c07ec2a425f4f426174d94946b | [
"BSD-3-Clause"
] | 2 | 2021-09-14T22:07:22.000Z | 2022-02-07T16:41:02.000Z | from aicssegmentation.workflow import WorkflowEngine
from napari_allencell_segmenter.controller.workflow_select_controller import WorkflowSelectController
from napari_allencell_segmenter.controller.workflow_steps_controller import WorkflowStepsController
from napari_allencell_segmenter.controller.batch_processing_controller import BatchProcessingController
from napari_allencell_segmenter.core.layer_reader import LayerReader
from napari_allencell_segmenter.core.controller import Controller
from ._interfaces import IApplication, IRouter
class Router(IRouter):
_controller = None
def __init__(self, application: IApplication):
if application is None:
raise ValueError("application")
self._application = application
# TODO do some proper dependency injection in the future if the project grows
self._layer_reader = LayerReader()
self._workflow_engine = WorkflowEngine()
def workflow_selection(self):
controller = WorkflowSelectController(self._application, self._layer_reader, self._workflow_engine)
self._handle_navigation(controller)
def workflow_steps(self):
controller = WorkflowStepsController(self._application, self._workflow_engine)
self._handle_navigation(controller)
def batch_processing(self):
controller = BatchProcessingController(self._application, self._workflow_engine)
self._handle_navigation(controller)
def _handle_navigation(self, controller: Controller):
if self._controller:
self._controller.cleanup()
self._controller = controller
self._controller.index()
| 43.368421 | 107 | 0.781553 |
511ed3f95569961b3a0e0c438fd919f3b634432d | 860 | py | Python | csrank/tests/test_util.py | hytsang/cs-ranking | 241626a6a100a27b96990b4f199087a6dc50dcc0 | [
"Apache-2.0"
] | null | null | null | csrank/tests/test_util.py | hytsang/cs-ranking | 241626a6a100a27b96990b4f199087a6dc50dcc0 | [
"Apache-2.0"
] | null | null | null | csrank/tests/test_util.py | hytsang/cs-ranking | 241626a6a100a27b96990b4f199087a6dc50dcc0 | [
"Apache-2.0"
] | 1 | 2018-10-30T08:57:14.000Z | 2018-10-30T08:57:14.000Z | import pytest
import numpy as np
import tensorflow as tf
from keras import backend as K
from ..dataset_reader.util import SyntheticIterator
from ..util import tensorify, check_ranker_class
def test_tensorify():
a = np.array([1., 2.])
out = tensorify(a)
assert isinstance(out, tf.Tensor)
b = K.zeros((5, 3))
out = tensorify(b)
assert b == out
def test_synthetic_iterator():
def func(a, b):
return (b, a)
it = SyntheticIterator(dataset_function=func,
a=41, b=2)
for i, (x, y) in enumerate(it):
if i == 1:
break
assert x == 2
assert y == 41
def test_check_ranker_class():
class MockClass(object):
def __init__(self):
pass
ranker = MockClass()
with pytest.raises(AttributeError):
check_ranker_class(ranker)
| 20.97561 | 51 | 0.60814 |
13a2e2ab9e3c020a8c3453084771ed2d53087c61 | 506 | py | Python | src/main/java/finished/dp/finished/No87_扰乱字符串/87.py | fortbox/leetcode-solve | 3e9d33fc2d1936ae028fe4b3925ab27e2098acb8 | [
"Apache-2.0"
] | null | null | null | src/main/java/finished/dp/finished/No87_扰乱字符串/87.py | fortbox/leetcode-solve | 3e9d33fc2d1936ae028fe4b3925ab27e2098acb8 | [
"Apache-2.0"
] | null | null | null | src/main/java/finished/dp/finished/No87_扰乱字符串/87.py | fortbox/leetcode-solve | 3e9d33fc2d1936ae028fe4b3925ab27e2098acb8 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019
# @Author: xiaoweixiang
class Solution:
def isScramble(self, s1: str, s2: str) -> bool:
if s1 == s2:
return True
if sorted(s1) != sorted(s2):
return False
for i in range(1, len(s1)):
if self.isScramble(s1[:i], s2[:i]) and self.isScramble(s1[i:], s2[i:]):
return True
if self.isScramble(s1[:i], s2[-i:]) and self.isScramble(s1[i:], s2[:-i]):
return True
return False
| 31.625 | 85 | 0.511858 |
b3b82b5440c2e31f8283634cce405af8fa49a21d | 4,259 | py | Python | samples/balloon/dataset/BaloonDataset.py | Trevol/Mask_RCNN | 18308082e2c5fd5b4df5d6e40f009b3ebd66c26d | [
"MIT"
] | null | null | null | samples/balloon/dataset/BaloonDataset.py | Trevol/Mask_RCNN | 18308082e2c5fd5b4df5d6e40f009b3ebd66c26d | [
"MIT"
] | null | null | null | samples/balloon/dataset/BaloonDataset.py | Trevol/Mask_RCNN | 18308082e2c5fd5b4df5d6e40f009b3ebd66c26d | [
"MIT"
] | null | null | null | import os, numpy as np, json, skimage
from mrcnn import utils
class BalloonDataset(utils.Dataset):
def load_balloon(self, dataset_dir, subset):
"""Load a subset of the Balloon dataset.
dataset_dir: Root directory of the dataset.
subset: Subset to load: train or val
"""
# Add classes. We have only one class to add.
self.add_class("balloon", 1, "balloon")
# Train or validation dataset?
assert subset in ["train", "val"]
dataset_dir = os.path.join(dataset_dir, subset)
# Load annotations
# VGG Image Annotator (up to version 1.6) saves each image in the form:
# { 'filename': '28503151_5b5b7ec140_b.jpg',
# 'regions': {
# '0': {
# 'region_attributes': {},
# 'shape_attributes': {
# 'all_points_x': [...],
# 'all_points_y': [...],
# 'name': 'polygon'}},
# ... more regions ...
# },
# 'size': 100202
# }
# We mostly care about the x and y coordinates of each region
# Note: In VIA 2.0, regions was changed from a dict to a list.
annotations = json.load(open(os.path.join(dataset_dir, "via_region_data.json")))
annotations = list(annotations.values()) # don't need the dict keys
# The VIA tool saves images in the JSON even if they don't have any
# annotations. Skip unannotated images.
annotations = [a for a in annotations if a['regions']]
# Add images
for a in annotations:
# Get the x, y coordinaets of points of the polygons that make up
# the outline of each object instance. These are stores in the
# shape_attributes (see json format above)
# The if condition is needed to support VIA versions 1.x and 2.x.
if type(a['regions']) is dict:
polygons = [r['shape_attributes'] for r in a['regions'].values()]
else:
polygons = [r['shape_attributes'] for r in a['regions']]
# load_mask() needs the image size to convert polygons to masks.
# Unfortunately, VIA doesn't include it in JSON, so we must read
# the image. This is only managable since the dataset is tiny.
image_path = os.path.join(dataset_dir, a['filename'])
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
self.add_image(
"balloon",
image_id=a['filename'], # use file name as a unique image id
path=image_path,
width=width, height=height,
polygons=polygons)
def load_mask(self, image_id):
"""Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a balloon dataset image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "balloon":
return super(self.__class__, self).load_mask(image_id)
# Convert polygons to a bitmap mask of shape
# [height, width, instance_count]
info = self.image_info[image_id]
mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
dtype=np.uint8)
for i, p in enumerate(info["polygons"]):
# Get indexes of pixels inside the polygon and set them to 1
rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
mask[rr, cc, i] = 1
# Return mask, and array of class IDs of each instance. Since we have
# one class ID only, we return an array of 1s
return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info["source"] == "balloon":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
| 42.168317 | 88 | 0.574313 |
48f7e0ccb02c07604d49bfd4e633b44aa4f06374 | 276 | py | Python | custom_laboratory/custom_laboratory/doctype/vaccination_intervals_table/vaccination_intervals_table.py | panhavad/custom_laboratory | a86d24bd955dc078ded044e714955cdf0c257176 | [
"MIT"
] | null | null | null | custom_laboratory/custom_laboratory/doctype/vaccination_intervals_table/vaccination_intervals_table.py | panhavad/custom_laboratory | a86d24bd955dc078ded044e714955cdf0c257176 | [
"MIT"
] | 1 | 2021-01-12T08:27:54.000Z | 2021-01-12T08:27:54.000Z | custom_laboratory/custom_laboratory/doctype/vaccination_intervals_table/vaccination_intervals_table.py | panhavad/custom_laboratory | a86d24bd955dc078ded044e714955cdf0c257176 | [
"MIT"
] | 1 | 2021-01-12T08:34:12.000Z | 2021-01-12T08:34:12.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Duk Panhavad and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class VaccinationIntervalsTable(Document):
pass
| 25.090909 | 51 | 0.786232 |
f9e16f5facb6a70358b9fa0a024382c2dd4cae1b | 8,287 | py | Python | pytests/CCCP.py | mhocouchbase/testrunner | 10faf6955a905dee9a254daf90352881d4687735 | [
"Apache-2.0"
] | null | null | null | pytests/CCCP.py | mhocouchbase/testrunner | 10faf6955a905dee9a254daf90352881d4687735 | [
"Apache-2.0"
] | null | null | null | pytests/CCCP.py | mhocouchbase/testrunner | 10faf6955a905dee9a254daf90352881d4687735 | [
"Apache-2.0"
] | null | null | null | import json
from memcached.helper.data_helper import MemcachedClientHelper
from membase.api.rest_client import RestConnection
from basetestcase import BaseTestCase
from couchbase_helper.document import View
from couchbase_helper.documentgenerator import BlobGenerator
from remote.remote_util import RemoteMachineShellConnection
from testconstants import COUCHBASE_FROM_VERSION_4, COUCHBASE_FROM_MAD_HATTER
class CCCP(BaseTestCase):
def setUp(self):
super(CCCP, self).setUp()
self.map_fn = 'function (doc){emit([doc.join_yr, doc.join_mo],doc.name);}'
self.ddoc_name = "cccp_ddoc"
self.view_name = "cccp_view"
self.default_view = View(self.view_name, self.map_fn, None, False)
self.ops = self.input.param("ops", None)
self.clients = {}
try:
for bucket in self.buckets:
self.clients[bucket.name] =\
MemcachedClientHelper.direct_client(self.master, bucket.name)
except:
self.tearDown()
def tearDown(self):
super(CCCP, self).tearDown()
def test_get_config_client(self):
tasks = self.run_ops()
for task in tasks:
if self.ops != 'failover':
task.result()
for bucket in self.buckets:
_, _, config = self.clients[bucket.name].get_config()
self.verify_config(json.loads(config), bucket)
def test_get_config_rest(self):
tasks = self.run_ops()
for task in tasks:
if not task:
self.fail("no task to run")
if self.ops == 'failover':
if not task:
self.fail("Ops failover failed ")
else:
task.result()
for bucket in self.buckets:
config = RestConnection(self.master).get_bucket_CCCP(bucket)
self.verify_config(config, bucket)
def test_set_config(self):
""" Negative test for setting bucket config. """
tasks = self.run_ops()
config_expected = 'abcabc'
for task in tasks:
task.result()
for bucket in self.buckets:
try:
self.clients[bucket.name].set_config(config_expected)
_, _, config = self.clients[bucket.name].get_config()
if econfig_expected == config:
self.fail("It should not allow to set this format config ")
except Exception as e:
if e and not "Memcached error #4 'Invalid'" in str(e):
self.fail("ns server should not allow to set this config format")
def test_not_my_vbucket_config(self):
self.gen_load = BlobGenerator('cccp', 'cccp-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, self.gen_load, "create", 0)
self.cluster.rebalance(self.servers[:self.nodes_init],
self.servers[self.nodes_init:self.nodes_init + 1], [])
self.nodes_init = self.nodes_init + 1
not_my_vbucket = False
for bucket in self.buckets:
while self.gen_load.has_next() and not not_my_vbucket:
key, _ = next(self.gen_load)
try:
self.clients[bucket.name].get(key)
except Exception as ex:
self.log.info("Config in exception is correct. Bucket %s, key %s"\
% (bucket.name, key))
config = str(ex)[str(ex).find("Not my vbucket':") \
+ 16 : str(ex).find("for vbucket")]
if not config.endswith("}"):
config += "}"
if config.strip().startswith("b'{"):
config = config.replace("b'{", "{")
try:
config = json.loads(config)
except Exception as e:
if "Expecting object" in str(e):
config += "}"
config = json.loads(config)
self.verify_config(config, bucket)
""" from watson, only the first error contains bucket details """
not_my_vbucket = True
def verify_config(self, config_json, bucket):
expected_params = ["nodeLocator", "rev", "uuid", "bucketCapabilitiesVer",
"bucketCapabilities"]
for param in expected_params:
self.assertTrue(param in config_json, "No %s in config" % param)
self.assertTrue("name" in config_json and config_json["name"] == bucket.name,
"No bucket name in config")
if self.cb_version[:5] in COUCHBASE_FROM_VERSION_4:
self.assertTrue(len(config_json["nodesExt"]) == self.nodes_init,
"Number of nodes expected %s, actual %s" % (
self.nodes_init, len(config_json["nodesExt"])))
else:
self.assertTrue(len(config_json["nodes"]) == self.nodes_init,
"Number of nodes expected %s, actual %s" % (
self.nodes_init, len(config_json["nodes"])))
for node in config_json["nodes"]:
self.assertTrue("couchApiBase" in node and "hostname" in node,
"No hostname name in config")
if self.cb_version[:5] in COUCHBASE_FROM_MAD_HATTER:
self.assertTrue(node["ports"]["direct"] == 11210,
"ports are incorrect: %s" % node)
else:
self.assertTrue(node["ports"]["proxy"] == 11211 and \
node["ports"]["direct"] == 11210,
"ports are incorrect: %s" % node)
self.assertTrue(config_json["ddocs"]["uri"] == \
("/pools/default/buckets/%s/ddocs" % bucket.name),
"Ddocs uri is incorrect: %s "
% "/pools/default/buckets/default/ddocs")
self.assertTrue(config_json["vBucketServerMap"]["numReplicas"] == self.num_replicas,
"Num replicas is incorrect: %s "
% config_json["vBucketServerMap"]["numReplicas"])
for param in ["hashAlgorithm", "serverList", "vBucketMap"]:
self.assertTrue(param in config_json["vBucketServerMap"],
"%s in vBucketServerMap" % param)
self.log.info("Bucket %s .Config was checked" % bucket.name)
def run_ops(self):
tasks = []
if not self.ops:
return tasks
if self.ops == 'rebalance_in':
tasks.append(self.cluster.async_rebalance(self.servers[:self.nodes_init],
self.servers[self.nodes_init:self.nodes_init + self.nodes_in], []))
self.nodes_init += self.nodes_in
elif self.ops == 'rebalance_out':
tasks.append(self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], self.servers[(self.nodes_init - self.nodes_out):self.nodes_init]))
self.nodes_init -= self.nodes_out
elif self.ops == 'failover':
tasks.append(self.cluster.failover(self.servers[:self.nodes_init],
self.servers[(self.nodes_init - self.nodes_out):self.nodes_init]))
self.sleep(20)
self.nodes_init -= self.nodes_out
if self.ops == 'create_views':
views_num = 10
views = self.make_default_views(self.view_name, views_num, different_map=True)
tasks.extend(self.async_create_views(self.master, self.ddoc_name, views))
if self.ops == 'restart':
servers_to_choose = [serv for serv in self.servers if self.master.ip != serv.ip]
self.assertTrue(servers_to_choose, "There is only one node in cluster")
shell = RemoteMachineShellConnection(servers_to_choose[0])
try:
shell.stop_couchbase()
shell.start_couchbase()
finally:
shell.disconnect()
self.sleep(5, "Server %s is starting..." % servers_to_choose[0].ip)
return tasks
| 48.461988 | 92 | 0.554845 |
2a646f38047ed355ba4f9488759d096cd8ec3b8a | 5,699 | py | Python | video_demo.py | PPGod95/FIDTM | b5582c5cc485496d85af2043ffd6e4266f354f3b | [
"MIT"
] | null | null | null | video_demo.py | PPGod95/FIDTM | b5582c5cc485496d85af2043ffd6e4266f354f3b | [
"MIT"
] | null | null | null | video_demo.py | PPGod95/FIDTM | b5582c5cc485496d85af2043ffd6e4266f354f3b | [
"MIT"
] | null | null | null | from __future__ import division
import warnings
from Networks.HR_Net.seg_hrnet import get_seg_model
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
import dataset
import math
from image import *
from utils import *
import logging
import nni
from nni.utils import merge_parameter
from config import return_args, args
warnings.filterwarnings('ignore')
import time
logger = logging.getLogger('mnist_AutoML')
print(args)
img_transform = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
tensor_transform = transforms.ToTensor()
def main(args):
model = get_seg_model()
model = nn.DataParallel(model, device_ids=[0])
model = model.cuda()
if args['pre']:
if os.path.isfile(args['pre']):
print("=> loading checkpoint '{}'".format(args['pre']))
checkpoint = torch.load(args['pre'])
model.load_state_dict(checkpoint['state_dict'], strict=False)
args['start_epoch'] = checkpoint['epoch']
args['best_pred'] = checkpoint['best_prec1']
else:
print("=> no checkpoint found at '{}'".format(args['pre']))
fourcc = cv2.VideoWriter_fourcc(*'XVID')
#fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
cap = cv2.VideoCapture(args['video_path'])
ret, frame = cap.read()
print(frame.shape)
'''out video'''
width = frame.shape[1] #output size
height = frame.shape[0] #output size
out = cv2.VideoWriter('./demo.avi', fourcc, 30, (width, height))
while True:
try:
ret, frame = cap.read()
scale_factor = 0.5
frame = cv2.resize(frame, (0, 0), fx=scale_factor, fy=scale_factor)
ori_img = frame.copy()
except:
print("test end")
cap.release()
break
frame = frame.copy()
image = tensor_transform(frame)
image = img_transform(image).unsqueeze(0)
with torch.no_grad():
d6 = model(image)
count, pred_kpoint = counting(d6)
point_map = generate_point_map(pred_kpoint)
box_img = generate_bounding_boxes(pred_kpoint, frame)
show_fidt = show_fidt_func(d6.data.cpu().numpy())
#res = np.hstack((ori_img, show_fidt, point_map, box_img))
res1 = np.hstack((ori_img, show_fidt))
res2 = np.hstack((box_img, point_map))
res = np.vstack((res1, res2))
cv2.putText(res, "Count:" + str(count), (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.imwrite('./demo.jpg', res)
'''write in out_video'''
out.write(res)
print("pred:%.3f" % count)
def counting(input):
input_max = torch.max(input).item()
keep = nn.functional.max_pool2d(input, (3, 3), stride=1, padding=1)
keep = (keep == input).float()
input = keep * input
input[input < 100.0 / 255.0 * torch.max(input)] = 0
input[input > 0] = 1
'''negative sample'''
if input_max<0.1:
input = input * 0
count = int(torch.sum(input).item())
kpoint = input.data.squeeze(0).squeeze(0).cpu().numpy()
return count, kpoint
def generate_point_map(kpoint):
rate = 1
pred_coor = np.nonzero(kpoint)
point_map = np.zeros((int(kpoint.shape[0] * rate), int(kpoint.shape[1] * rate), 3), dtype="uint8") + 255 # 22
# count = len(pred_coor[0])
coord_list = []
for i in range(0, len(pred_coor[0])):
h = int(pred_coor[0][i] * rate)
w = int(pred_coor[1][i] * rate)
coord_list.append([w, h])
cv2.circle(point_map, (w, h), 3, (0, 0, 0), -1)
return point_map
def generate_bounding_boxes(kpoint, Img_data):
'''generate sigma'''
pts = np.array(list(zip(np.nonzero(kpoint)[1], np.nonzero(kpoint)[0])))
leafsize = 2048
if pts.shape[0] > 0: # Check if there is a human presents in the frame
# build kdtree
tree = scipy.spatial.KDTree(pts.copy(), leafsize=leafsize)
distances, locations = tree.query(pts, k=4)
for index, pt in enumerate(pts):
pt2d = np.zeros(kpoint.shape, dtype=np.float32)
pt2d[pt[1], pt[0]] = 1.
if np.sum(kpoint) > 1:
sigma = (distances[index][1] + distances[index][2] + distances[index][3]) * 0.1
else:
sigma = np.average(np.array(kpoint.shape)) / 2. / 2. # case: 1 point
sigma = min(sigma, min(Img_data.shape[0], Img_data.shape[1]) * 0.04)
if sigma < 6:
t = 2
else:
t = 2
Img_data = cv2.rectangle(Img_data, (int(pt[0] - sigma), int(pt[1] - sigma)),
(int(pt[0] + sigma), int(pt[1] + sigma)), (0, 255, 0), t)
return Img_data
def show_fidt_func(input):
input[input < 0] = 0
input = input[0][0]
fidt_map1 = input
fidt_map1 = fidt_map1 / np.max(fidt_map1) * 255
fidt_map1 = fidt_map1.astype(np.uint8)
fidt_map1 = cv2.applyColorMap(fidt_map1, 2)
return fidt_map1
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if __name__ == '__main__':
tuner_params = nni.get_next_parameter()
logger.debug(tuner_params)
params = vars(merge_parameter(return_args, tuner_params))
print(params)
main(params)
| 29.528497 | 114 | 0.586945 |
8583d2cb3bdd725c34b2ba7e6a2037dcbde91c50 | 19,469 | py | Python | Resources/Docs/Slicer useful snippets.py | ihnorton/SlicerCIP | a90ece2991b074c755b1243894b67a63d3a19419 | [
"BSD-3-Clause"
] | null | null | null | Resources/Docs/Slicer useful snippets.py | ihnorton/SlicerCIP | a90ece2991b074c755b1243894b67a63d3a19419 | [
"BSD-3-Clause"
] | null | null | null | Resources/Docs/Slicer useful snippets.py | ihnorton/SlicerCIP | a90ece2991b074c755b1243894b67a63d3a19419 | [
"BSD-3-Clause"
] | null | null | null | MAIN CLASSES:
- Slicer/Base/Python/slicer/util.py ==> very useful class to load volumes and other common operations (import as slicer.util)
- vtk.util ==> convert vtk in numpy and viceversa (see also CIP_/CIP/Util)
- ScriptedLoadableModuleTemplate ==> class that contains the default template for a new Python module
- slicer.modules.volumes.logic() ==> handle volumes (events, etc.)
- slicer.vtkSlicerVolumesLogic() ==> more volumes handeling
- slicer.app.applicationLogic() ==> important applications general to Slicer
- slicer.app.layoutManager() ==> general layout operations (ex: select a different module?)
- slicer.modules.volumerendering.logic() ==> many utilites for 3D 3rendering
- SlicerCIP/CIP_/CIP/SlicerUtil ==> some common utils for Slicer (expected to be extended in the future)
- SlicerCIP/CIP_/CIP/Util ==> common utils specially fot volume handling: VTK<=>numpy, save nodes
Some useful snippets: http://wiki.slicer.org/slicerWiki/index.php/Documentation/Nightly/Developers/Python_scripting
####################################################################################
- All the scripted modules inherit from Base/QTGUI/qSlicerScriptedLoadableModule.
- Every scripted module should have:
- Class named like the module
- Class_widget ==> gui components
- Class_logic ==> class logic (optional). Shouldn't use any componentes from the gui
- Class_tests ==> testing class (optional). Tests that could be run from Slicer in runtime.
- enter ==> event when entering the module (in the widget)
- exit ==> event when exiting the module (in the widget)
- cleanup ==> free resources, stop listening to events, etc.
####################################################################################
Node types:
- vtkMRMLScalarVolumeNode: "raw" data
- vtkImageData: "geometrical information" associated with the rendering. To get it: scalarNode.GetImageData()
- slicer.util.loadVolume(path) ==> load a regular volume
- slicer.util.loadLabelVolume(path) ==> load a labelmap volume
- slicer.util.saveNode(node, path) ==> save a node
# Is the volume a labelmap?
- node.GetAttribute("LabelMap") == '1'
Note: the "regular" node (both volumes and labelmaps) are vtkMRMLScalarVolumeNode
- node = slicer.util.getNode(nodeName) ==> get a node loaded in the scene with its name
- node = slicer.mrmlScene.GetNodeByID(id) ==> get a node loaded in the scene with its internal id
- nodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLScalarVolumeNode') ==> get all the nodes of type vtkMRMLScalarVolumeNode
- to iterate over the nodes:
nodes.InitTraversal()
n = nodes.GetNextItemAsObject() or n = nodes.GetItemAsObject(0) (this would return an object of type vtkMRMLScalarVolumeNode)
####################################################################################
NUMPY / VTK
- slicer.util.array(node.GetName()) ==> create a numpy array that is bound to the node (the changes in the array will can be updated in the node )
- to refresh the changes in the user interface:
- node.GetImageData().Modified()
- SlicerUtil.refreshActiveWindows() # custom method that repaints all the visible windows
####################################################################################
# General mechanism to listen to the events in a node or in the Slicer scene
- slicer.mrmlScene.AddObserver(slicer.vtkMRMLScene.NodeAddedEvent, self.onNodeAdded) ==> listen to events in the scene. This does NOT return the added node (see "Observe the scene to capture a volume when it is added ")
####################################################################################
# Create a new volume from scratch (adding it to the scene automatically)
node = slicer.mrmlScene.CreateNodeByClass("vtkMRMLDisplayNode")
slicer.mrmlScene.AddNode(node)
# or
#node = slicer.vtkSlicerVolumesLogic().
# Get node by name (also valid with id)
n = slicer.util.getNode("12257B_INSP_STD_UIA_COPD")
####################################################################################
# Clone a volume
vl = slicer.modules.volumes.logic()
newvol = vl.CloneVolume(slicer.mrmlScene, node, "myNode")
####################################################################################
# NODE SELECTOR (FILTERED BY LABEL MAPS)
self.volumeSelector = slicer.qMRMLNodeComboBox()
self.volumeSelector.nodeTypes = ( "vtkMRMLScalarVolumeNode", "" )
#self.volumeSelector.addAttribute( "vtkMRMLScalarVolumeNode", "LabelMap", "1" ) deprecated. use new vtkMRMLLabelMapVolumeNode type
self.volumeSelector.selectNodeUponCreation = False
self.volumeSelector.addEnabled = False
self.volumeSelector.noneEnabled = False
self.volumeSelector.removeEnabled = False
self.volumeSelector.showHidden = False
self.volumeSelector.showChildNodeTypes = False
self.volumeSelector.setMRMLScene( slicer.mrmlScene )
self.volumeSelector.setToolTip( "Pick the label map to edit" )
self.volumeSelectorFrame.layout().addWidget( self.volumeSelector )
....
self.volumeSelector.connect('currentNodeChanged(vtkMRMLNode*)', self.onVolumeSelected)
...
selectedNode = self.volumeSelector.currentNode()
self.volumeSelector.setCurrentNode(node) or setCurrentNodeID(volumeId)
####################################################################################
# Select a volumnenode and a label map as the active nodes in Slicer through a selectionNode. Required for example for the Editor
selectionNode = slicer.app.applicationLogic().GetSelectionNode()
selectionNode.SetReferenceActiveVolumeID( self.master.GetID() )
selectionNode.SetReferenceActiveLabelVolumeID( merge.GetID() )
slicer.app.applicationLogic().PropagateVolumeSelection(0)
# IMPORTANT: the layer is the type of node (background, foreground, labelmap). We can use a particular method like appLogic.PropagateForegroundVolumeSelection()
NOTE: selectionNode can be used not only for volumes, but also for fiducials, ROIs, etc.
####################################################################################
# use the Red slice composite node to define the active volumes """
count = slicer.mrmlScene.GetNumberOfNodesByClass('vtkMRMLSliceCompositeNode')
for n in xrange(count):
compNode = slicer.mrmlScene.GetNthNodeByClass(n, 'vtkMRMLSliceCompositeNode')
if compNode.GetLayoutName() == layoutName:
return compNode
Note: vtkMRMLSliceCompositeNode has:
- BackgroundVolumeID: vtkMRMLScalarVolumeNode2
- ForegroundVolumeID: (none)
- LabelVolumeID: vtkMRMLScalarVolumeNode4
########################################################################
# Clone a volume.
- Direct (will keep the same name and added directly to the scene)
slicer.mrmlScene.CopyNode(labelMapNode)
- "Manual":
logic = slicer.vtkSlicerVolumesLogic()
labelMapNode = slicer.util.getNode("10270J_INSP_STD_JHU_COPD_bodyComposition")
labelMapCopyNode = logic.CloneVolume(labelMapNode, "Copy_10270J_INSP_STD_JHU_COPD_bodyComposition")
########################################################################
# Invoke a Python sentence from C++:
slicer.app.pythonManager().executeString("slicer.util.openAddDICOMDialog()")
########################################################################
# Observe the scene to capture a volume when it is added
from functools import partial
def onNodeAdded(self, caller, eventId, callData):
"""Node added to the Slicer scene"""
if callData.GetClassName() == 'vtkMRMLAnnotationSnapshotNode': # (Generally vtkMRMLScalarVolumeNode)
self.__onNodeAdded__(callData)
self.onNodeAdded = partial(onNodeAdded, self)
self.onNodeAdded.CallDataType = vtk.VTK_OBJECT
#####################################################
# Capture the scene closed event
slicer.mrmlScene.AddObserver(slicer.vtkMRMLScene.EndCloseEvent, self.__onSceneClosed__)
...
def __onSceneClosed__(self, arg1, arg2):
...
# IMPORTANT: all these operations must be executed in the __init__ of the Widget
......
slicer.mrmlScene.AddObserver(slicer.vtkMRMLScene.NodeAddedEvent, self.onNodeAdded)
#####################################################
# Open a file for reading
with open(self.csvFilePath, 'r+b') as csvfileReader:
text = csvfileReader.read()
# Open a file for writing
with open(self.csvFilePath, 'a+b') as csvfile:
csvfile.write("This is what I write")
########################################################################
# Handle user events (mouse, keyboard...)
layoutManager = slicer.app.layoutManager()
sliceNodeCount = slicer.mrmlScene.GetNumberOfNodesByClass('vtkMRMLSliceNode')
for nodeIndex in xrange(sliceNodeCount):
# find the widget for each node in scene
sliceNode = slicer.mrmlScene.GetNthNodeByClass(nodeIndex, 'vtkMRMLSliceNode')
sliceWidget = layoutManager.sliceWidget(sliceNode.GetLayoutName())
if sliceWidget:
# add obserservers and keep track of tags
interactor = sliceWidget.sliceView().interactor()
self.sliceWidgetsPerStyle[style] = sliceWidget
events = ("MouseMoveEvent", "EnterEvent", "LeaveEvent")
# See http://www.vtk.org/doc/release/5.0/html/a01252.html for a complete list of VTK events
for event in events:
tag = interactor.AddObserver(event, self.processEvent, self.priority)
self.observerTags.append([interactor,tag])
tag = sliceNode.AddObserver("ModifiedEvent", self.processEvent, self.priority)
self.observerTags.append([sliceNode,tag])
sliceLogic = sliceWidget.sliceLogic()
compositeNode = sliceLogic.GetSliceCompositeNode()
tag = compositeNode.AddObserver("ModifiedEvent", self.processEvent, self.priority)
self.observerTags.append([compositeNode,tag])
# To get the position of the mouse in a left click:
def f(obj, event):
print interactor.GetLastEventPosition()
interactor.AddObserver(vtk.vtkCommand.LeftButtonPressEvent, f)
########################################################################
# Make a batch of changes in the scene and trigger just one event
slicer.mrmlScene.StartState(slicer.mrmlScene.BatchProcessState)
...
...
slicer.mrmlScene.EndState(slicer.mrmlScene.BatchProcessState) => triggers EndBatchProcessEvent
########################################################################
# Work with fiducials
vtkMRMLMarkupsFiducialNode --> node type that stores fiducials
markupsLogic = slicer.modules.markups.logic()
# Add new fiducials node
fiducialListNodeID = markupsLogic.AddNewFiducialNode(nodeName,slicer.mrmlScene)
fiducialList = slicer.util.getNode(fiducialListNodeID)
# Add fiducial
position = (-6, 2.5, 100) --> RAS coordinates
index = fiducialList.AddFiducial(*position)
index = fiducialsNode.AddFiducial(-6, 103, -204.5, 'otro') --> alternative
# Modify fiducial
fiducialsNode.SetNthMarkupVisibility(1, False) --> hide fiducial
# Get/set active node that will contain the fiducials
originalActiveListID = markupsLogic.GetActiveListID()
markupsLogic.SetActiveListID(fiducialList)
# Modify visual properties of the fiducials set (example)
displayNode = fiducialList.GetDisplayNode()
displayNode.SetTextScale(6.)
displayNode.SetGlyphScale(6.)
displayNode.SetGlyphTypeFromString('StarBurst2D')
displayNode.SetSelectedColor((1,1,0)) # Enabled fiducials (default)
displayNode.SetColor((1,1,0.4)) # Disabled fiducials
displayNode.SetVisibility(True)
fiducialList.SetAttribute("AssociatedNodeID", associatedNode.GetID()) ???
# Get the position of a fiducial
pos = [0,0,0]
activeNode = markupsLogic.GetActiveListID()
activeNode.GetNthFiducialPosition(0, pos) ==> it stores in pos the RAS coordinates
# Position the 2D windows in a fiducial:
logic.JumpSlicesToNthPointInMarkup(fiducialsNode.GetID(), 0, True)
####################################################################################
# Set the cursor to draw fiducials
# This functionaluty has been encapsulated in the SlicerUtil "setFiducialMode" method
def setFiducialsMode(isFiducialsMode, keepFiducialsModeOn=False):
""" Activate fiducials mode.
When activateFiducials==True, the mouse cursor will be ready to add fiducials. Also, if
keepFiducialsModeOn==True, then the cursor will be still in Fiducials mode until we deactivate it by
calling setFiducialsMode with activateFiducials=False
:param isFiducialsMode: True for "fiducials mode". False for a regular use
:param keepFiducialsModeOn: when True, we can add an unlimited number of fiducials. Otherwise after adding the
first fiducial we will come back to the regular state
"""
applicationLogic = slicer.app.applicationLogic()
selectionNode = applicationLogic.GetSelectionNode()
selectionNode.SetReferenceActivePlaceNodeClassName("vtkMRMLMarkupsFiducialNode")
interactionNode = applicationLogic.GetInteractionNode()
if isFiducialsMode:
# Mouse cursor --> fiducials
interactionNode.SetCurrentInteractionMode(1)
# Persistence depending on if we to keep fiducials (or just one)
interactionNode.SetPlaceModePersistence(keepFiducialsModeOn)
else:
# Regular cursor
interactionNode.SetCurrentInteractionMode(2)
interactionNode.SetPlaceModePersistence(False)
####################################################################################
# Capture fiducials events
fidListNode.AddObserver(fidNode.MarkupAddedEvent, self.onMarkupAdded)
...
def onMarkupAdded(self, markupListNode, event):
# Get the last added markup (there is no index in the event!)
n = markupListNode.GetNumberOfFiducials()
# Change the label of the last added node
markupListNode.SetNthMarkupLabel(n-1, label)
# NOTE: see some very useful event handling here: https://github.com/pieper/LandmarkRegistration/blob/master/RegistrationLib/Landmarks.py#L77-L115
####################################################################################
# XYZ --> RAS (in a ROI). Extracted from SlicerLongitudinalPETCTModuleViewHelper in LongitudinalPETCT Module. There are another useful functions there
def getROIPositionInRAS(roi):
xyz = [0.,0.,0.]
if roi:
roi.GetXYZ(xyz)
xyz = [xyz[0],xyz[1],xyz[2],1.0]
roiTransform = roi.GetParentTransformNode()
if roiTransform:
matrix = vtk.vtkMatrix4x4()
roiTransform.GetMatrixTransformToWorld(matrix)
xyz = matrix.MultiplyDoublePoint(xyz)
xyz = [xyz[0],xyz[1],xyz[2]]
return xyz
####################################################################################
# RAS --> IJK (XYZ). Working with current slice in Axial
layoutManager = slicer.app.layoutManager()
redWidget = layoutManager.sliceWidget('Red')
redNodeSliceNode = redWidget.sliceLogic().GetSliceNode()
scalarVolumeNode = ...
#redNodeSliceNode = redWidget.sliceLogic().GetLabelLayer().GetSliceNode() # Working with the labelmap (optional)
# Get the current slice in RAS coordinates
rasSliceOffset = redNodeSliceNode.GetSliceOffset()
# Get the RAS to IJK transformation matrix to convert RAS-->IJK coordinates
transformationMatrix=vtk.vtkMatrix4x4()
scalarVolumeNode.GetRASToIJKMatrix(transformationMatrix)
# Get the K coordinate (slice number in IJK coordinate)
sliceK = transformationMatrix.MultiplyPoint([0,0,rasSliceOffset,1])[2]
# Alternative way: through sliceWidget.sliceView, but it doesn't seem to work well
####################################################################################
# Show a popup message in the main window
qt.QMessageBox.warning(slicer.util.mainWindow(), 'My Warning', 'This is a warning message')
#.information, etc.
####################################################################################
# Print debug messages in console (general, not Python one)
import logging
logging.info('This is an info message')
####################################################################################
# Create new instance of EditorWidget
editorWidgetParent = slicer.qMRMLWidget()
editorWidgetParent.setLayout(qt.QVBoxLayout())
editorWidgetParent.setMRMLScene(slicer.mrmlScene)
editorWidgetParent.hide()
self.editorWidget = EditorWidget(editorWidgetParent, False)
self.editorWidget.setup()
####################################################################################
# Go to a specific module
m = slicer.util.mainWindow()
m.moduleSelector().selectModule('ModelMaker')
###
# Iterate over the different 2D windows and change opacity
nodes = slicer.mrmlScene.GetNodesByClass("vtkMRMLSliceCompositeNode")
# Call necessary to allow the iteration.
nodes.InitTraversal()
# Get the first CompositeNode (typically Red)
compositeNode = nodes.GetNextItemAsObject()
# Link the nodes by default
while compositeNode:
compositeNode.SetLinkedControl(True)
compositeNode.SetLabelOpacity(0.5) # In order the structures are visible
compositeNode = nodes.GetNextItemAsObject()
####################################################################################
# WORKING WITH CLIs
# Call the CLI
parameters = {"command": command}
module = slicer.modules.executesystemcommand
self.commandLineModuleNode = slicer.cli.run(module, None, parameters, wait_for_completion=False)
# Get the result
self.commandLineModuleNode.AddObserver('ModifiedEvent', self.__onExecuteCommandCLIStateUpdated__)
...
def __onExecuteCommandCLIStateUpdated__(self, caller, event):
if caller.IsA('vtkMRMLCommandLineModuleNode'):
if caller.GetStatusString() == "Completed":
# IMPORTANT: this is not necessarily executed just once!
print("CLI Process complete")
# If you want to get some output values:
myOutputParam = self.commandLineModuleNode.GetParameterDefault(0,1) # Get the parameter 1 in the group 0
elif caller.GetStatusString() == "Completed with errors":
# IMPORTANT: this is not necessarily executed just once!
print("CLI Process FAILED")
# In order that parameter/s output works, we should do this in the CLI:
include <fstream>
....
std::ofstream writeFile (returnParameterFile.c_str());
writeFile << "output = " << valueThatIWantToReturn << std::endl;
writeFile.close();
####################################################################################
# Camera node selector
cameraNodeSelector = slicer.qMRMLNodeComboBox()
cameraNodeSelector.objectName = 'cameraNodeSelector'
cameraNodeSelector.toolTip = "Select a camera that will fly along this path."
cameraNodeSelector.nodeTypes = ['vtkMRMLCameraNode']
cameraNodeSelector.noneEnabled = False
cameraNodeSelector.addEnabled = False
cameraNodeSelector.removeEnabled = False
cameraNodeSelector.connect('currentNodeChanged(bool)', self.enableOrDisableCreateButton)
cameraNodeSelector.connect('currentNodeChanged(vtkMRMLNode*)', self.setCameraNode)
pathFormLayout.addRow("Camera:", cameraNodeSelector)
self.parent.connect('mrmlSceneChanged(vtkMRMLScene*)',
cameraNodeSelector, 'setMRMLScene(vtkMRMLScene*)')
####################################################################
# Getting cursor position
widget = slicer.app.layoutManager().sliceWidget('Red')
interactor = widget.interactorStyle().GetInteractor()
crosshairNode=slicer.util.getNode('Crosshair')
from CIP.logic.SlicerUtil import SlicerUtil
from CIP.logic import Util
v = SlicerUtil.getFirstScalarNode()
def f(arg1, arg2):
coords = [0,0, 0]
crosshairNode.GetCursorPositionRAS(coords)
print "RAS: ", coords
print "Converted:", Util.ras_to_ijk(v, coords)
interactor.AddObserver("LeftButtonPressEvent", f)
| 43.948081 | 219 | 0.678874 |
f69f2c0f028a117447e65c43b39bbc23b09fac4d | 7,858 | py | Python | src/rosdistro/release_build_file.py | andre-rosa/rosdistro-1 | 62b79df2adc466ec0ea239e9210dcb26cac558ab | [
"BSD-3-Clause"
] | 742 | 2017-07-05T02:49:36.000Z | 2022-03-30T12:55:43.000Z | src/rosdistro/release_build_file.py | andre-rosa/rosdistro-1 | 62b79df2adc466ec0ea239e9210dcb26cac558ab | [
"BSD-3-Clause"
] | 94 | 2015-01-09T19:45:10.000Z | 2022-03-22T18:44:49.000Z | src/rosdistro/release_build_file.py | andre-rosa/rosdistro-1 | 62b79df2adc466ec0ea239e9210dcb26cac558ab | [
"BSD-3-Clause"
] | 425 | 2017-07-04T22:03:29.000Z | 2022-03-29T06:59:06.000Z | # Software License Agreement (BSD License)
#
# Copyright (c) 2013, Open Source Robotics Foundation, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Open Source Robotics Foundation, Inc. nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
class ReleaseBuildFile(object):
_type = 'release-build'
def __init__(self, name, data):
self.name = name
assert 'type' in data, "Expected file type is '%s'" % ReleaseBuildFile._type
assert data['type'] == ReleaseBuildFile._type, "Expected file type is '%s', not '%s'" % (ReleaseBuildFile._type, data['type'])
assert 'version' in data, "Release build file for '%s' lacks required version information" % self.name
assert int(data['version']) == 1, "Unable to handle '%s' format version '%d', please update rosdistro (e.g. on Ubuntu/Debian use: sudo apt-get update && sudo apt-get install --only-upgrade python-rosdistro)" % (ReleaseBuildFile._type, int(data['version']))
self.version = int(data['version'])
self.package_whitelist = []
if 'package_whitelist' in data:
self.package_whitelist = data['package_whitelist']
assert isinstance(self.package_whitelist, list)
self.package_blacklist = []
if 'package_blacklist' in data:
self.package_blacklist = data['package_blacklist']
assert isinstance(self.package_blacklist, list)
self.notify_emails = []
self.notify_maintainers = None
self.notify_committers = None
if 'notifications' in data:
if 'emails' in data['notifications']:
self.notify_emails = data['notifications']['emails']
assert isinstance(self.notify_emails, list)
if 'maintainers' in data['notifications'] and data['notifications']['maintainers']:
self.notify_maintainers = True
if 'committers' in data['notifications'] and data['notifications']['committers']:
self.notify_committers = True
assert 'targets' in data
self._targets = {}
for os_name in data['targets'].keys():
if os_name == '_config':
self._targets[os_name] = data['targets'][os_name]
continue
self._targets[os_name] = {}
for os_code_name in data['targets'][os_name]:
if os_code_name == '_config':
self._targets[os_name][os_code_name] = data['targets'][os_name][os_code_name]
continue
self._targets[os_name][os_code_name] = {}
for arch in data['targets'][os_name][os_code_name]:
self._targets[os_name][os_code_name][arch] = data['targets'][os_name][os_code_name][arch]
assert 'jenkins_url' in data
self.jenkins_url = str(data['jenkins_url'])
self.jenkins_sourcedeb_job_timeout = None
if 'jenkins_sourcedeb_job_timeout' in data:
self.jenkins_sourcedeb_job_timeout = int(data['jenkins_sourcedeb_job_timeout'])
self.jenkins_binarydeb_job_timeout = None
if 'jenkins_binarydeb_job_timeout' in data:
self.jenkins_binarydeb_job_timeout = int(data['jenkins_binarydeb_job_timeout'])
self.sync_package_count = None
self.sync_packages = []
if 'sync' in data:
if 'package_count' in data['sync']:
self.sync_package_count = int(data['sync']['package_count'])
if 'packages' in data['sync']:
self.notify_maintainers = data['sync']['packages']
assert isinstance(self.sync_packages, list)
def get_target_os_names(self):
return [t for t in self._targets.keys() if t != '_config']
def get_target_os_code_names(self, os_name):
os_code_names = self._targets[os_name]
return [t for t in os_code_names.keys() if t != '_config']
def get_target_arches(self, os_name, os_code_name):
arches = self._targets[os_name][os_code_name]
return [t for t in arches.keys() if t != '_config']
def get_target_configuration(self, os_name=None, os_code_name=None, arch=None):
assert os_code_name is not None or arch is None
assert os_name is not None or os_code_name is None
config = {}
if '_config' in self._targets:
config.update(self._targets['_config'])
if os_name is not None:
if '_config' in self._targets[os_name]:
config.update(self._targets[os_name]['_config'])
if os_code_name is not None:
if '_config' in self._targets[os_name][os_code_name]:
config.update(self._targets[os_name][os_code_name]['_config'])
if arch is not None:
if '_config' in self._targets[os_name][os_code_name][arch]:
config.update(self._targets[os_name][os_code_name][arch]['_config'])
return config
def get_data(self):
data = {}
data['type'] = ReleaseBuildFile._type
data['version'] = 1
if self.package_whitelist:
data['package_whitelist'] = self.package_whitelist
if self.package_blacklist:
data['package_blacklist'] = self.package_blacklist
if self.notify_emails or self.notify_maintainers or self.notify_committers:
data['notifications'] = {}
if self.notify_emails:
data['notifications']['emails'] = self.notify_emails
if self.notify_maintainers is not None:
data['notifications']['maintainers'] = bool(self.notify_maintainers)
if self.notify_committers is not None:
data['notifications']['committers'] = bool(self.notify_committers)
data['targets'] = self._targets
data['jenkins_url'] = self.jenkins_url
if self.jenkins_sourcedeb_job_timeout:
data['jenkins_sourcedeb_job_timeout'] = self.jenkins_sourcedeb_job_timeout
if self.jenkins_binarydeb_job_timeout:
data['jenkins_binarydeb_job_timeout'] = self.jenkins_binarydeb_job_timeout
if self.sync_package_count or self.sync_packages:
data['sync'] = {}
if self.sync_package_count is not None:
data['sync']['package_count'] = self.sync_package_count
if self.sync_packages:
data['sync']['packages'] = self.sync_packages
return data
| 47.624242 | 264 | 0.654747 |
5955f55736235a1a6b5886b323add5d5893e7fc6 | 1,196 | py | Python | 04_5-push_button_control_led/push_button_control_led.py | raspberrypi-tw/gpio-game-console | 5319addec034dae72bf829e5873626b00b69e3d5 | [
"BSD-3-Clause"
] | 13 | 2016-04-17T07:23:38.000Z | 2021-04-20T04:54:26.000Z | 04_5-push_button_control_led/push_button_control_led.py | raspberrypi-tw/gpio-game-console | 5319addec034dae72bf829e5873626b00b69e3d5 | [
"BSD-3-Clause"
] | null | null | null | 04_5-push_button_control_led/push_button_control_led.py | raspberrypi-tw/gpio-game-console | 5319addec034dae72bf829e5873626b00b69e3d5 | [
"BSD-3-Clause"
] | 16 | 2016-05-05T04:33:06.000Z | 2021-04-23T05:42:08.000Z | #!/usr/bin/python3
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#|R|a|s|p|b|e|r|r|y|P|i|.|c|o|m|.|t|w|
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# Copyright (c) 2021, raspberrypi.com.tw
# All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# push_button_control_led.py
# Turn on the led when push button is pressed with interrupt way, and
# de-bounces by software
#
# Author : sosorry
# Date : 06/22/2014
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
BTN_PIN = 11
LED_PIN = 12
WAIT_TIME = 200
status = GPIO.LOW
GPIO.setup(BTN_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(LED_PIN, GPIO.OUT, initial=status)
def mycallback(channel):
print("Button pressed @", time.ctime())
global status
if status == GPIO.LOW:
status = GPIO.HIGH
else:
status = GPIO.LOW
GPIO.output(LED_PIN, status)
try:
GPIO.add_event_detect(BTN_PIN, GPIO.FALLING, callback=mycallback, bouncetime=WAIT_TIME)
while True:
time.sleep(10)
except KeyboardInterrupt:
print("Exception: KeyboardInterrupt")
finally:
GPIO.cleanup()
| 24.916667 | 88 | 0.623746 |
6f2ea9dbb3ec4c985239c5a6ee455f74dad8baac | 1,880 | py | Python | clients/python/tyckiting_client/ai/himanshu.py | HimanshuSingal/space_mission | 7d032f02b6144f412e23cd012d87965f68484fea | [
"MIT"
] | null | null | null | clients/python/tyckiting_client/ai/himanshu.py | HimanshuSingal/space_mission | 7d032f02b6144f412e23cd012d87965f68484fea | [
"MIT"
] | null | null | null | clients/python/tyckiting_client/ai/himanshu.py | HimanshuSingal/space_mission | 7d032f02b6144f412e23cd012d87965f68484fea | [
"MIT"
] | null | null | null | import random
from tyckiting_client.ai import base
from tyckiting_client import actions
class Ai(base.BaseAi):
"""
Dummy bot that moves randomly around the board.
"""
def respond(self, bot, events):
for e in events:
if e.event == "radarEcho":
cannon_pos = e.pos;
return actions.Cannon(bot_id=bot.bot_id,
x=cannon_pos.x,
y=cannon_pos.y)
if e.event == "detected":
valid_moves = list(self.get_valid_moves(bot))
far_moves = []
for vm in valid_moves:
if abs(vm.x - bot.pos.x) > 1 or abs(vm.y - bot.pos.y) > 1:
far_moves.append(vm);
move_pos = random.choice(far_moves);
return actions.Move(bot_id=bot.bot_id,
x=move_pos.x,
y=move_pos.y)
radar_pos = random.choice(list(self.get_valid_radars(bot)));
return actions.Radar(bot_id=bot.bot_id,
x=radar_pos.x,
y=radar_pos.y)
def move(self, bots, events):
"""
Move the bot to a random legal positon.
Args:
bots: List of bot states for own team
events: List of events form previous round
Returns:
List of actions to perform this round.
"""
response = []
for bot in bots:
if not bot.alive:
continue
response.append(self.respond(bot, events))
return response
| 30.819672 | 78 | 0.434574 |
18fddec2ae7a32a91ffdcf9adff59458bb9b6139 | 4,489 | py | Python | python/openapi_client/models/__init__.py | Mastercard/mcapi_oauth_encryption_tutorial | 0c24f778ad57a867eefd8aad44466a49f3f89826 | [
"MIT"
] | 26 | 2019-08-15T10:48:16.000Z | 2022-03-03T21:57:52.000Z | python/openapi_client/models/__init__.py | Mastercard/mcapi_oauth_encryption_tutorial | 0c24f778ad57a867eefd8aad44466a49f3f89826 | [
"MIT"
] | 12 | 2019-12-30T08:36:00.000Z | 2022-03-29T22:37:50.000Z | python/openapi_client/models/__init__.py | Mastercard/mcapi_oauth_encryption_tutorial | 0c24f778ad57a867eefd8aad44466a49f3f89826 | [
"MIT"
] | 36 | 2019-08-14T14:27:35.000Z | 2022-02-13T18:02:36.000Z | # flake8: noqa
# import all models into this package
# if you have many models here with many references from one model to another this may
# raise a RecursionError
# to avoid this, import only the models that you directly need like:
# from from openapi_client.model.pet import Pet
# or import this package, but before doing it, use:
# import sys
# sys.setrecursionlimit(n)
from openapi_client.model.account_holder_data import AccountHolderData
from openapi_client.model.account_holder_data_outbound import AccountHolderDataOutbound
from openapi_client.model.asset_response_schema import AssetResponseSchema
from openapi_client.model.authentication_methods import AuthenticationMethods
from openapi_client.model.billing_address import BillingAddress
from openapi_client.model.card_account_data_inbound import CardAccountDataInbound
from openapi_client.model.card_account_data_outbound import CardAccountDataOutbound
from openapi_client.model.decisioning_data import DecisioningData
from openapi_client.model.delete_request_schema import DeleteRequestSchema
from openapi_client.model.delete_response_schema import DeleteResponseSchema
from openapi_client.model.encrypted_payload import EncryptedPayload
from openapi_client.model.encrypted_payload_transact import EncryptedPayloadTransact
from openapi_client.model.error import Error
from openapi_client.model.errors_response import ErrorsResponse
from openapi_client.model.funding_account_data import FundingAccountData
from openapi_client.model.funding_account_info import FundingAccountInfo
from openapi_client.model.funding_account_info_encrypted_payload import FundingAccountInfoEncryptedPayload
from openapi_client.model.gateway_error import GatewayError
from openapi_client.model.gateway_errors_response import GatewayErrorsResponse
from openapi_client.model.gateway_errors_schema import GatewayErrorsSchema
from openapi_client.model.get_task_status_request_schema import GetTaskStatusRequestSchema
from openapi_client.model.get_task_status_response_schema import GetTaskStatusResponseSchema
from openapi_client.model.get_token_request_schema import GetTokenRequestSchema
from openapi_client.model.get_token_response_schema import GetTokenResponseSchema
from openapi_client.model.media_content import MediaContent
from openapi_client.model.notify_token_encrypted_payload import NotifyTokenEncryptedPayload
from openapi_client.model.notify_token_updated_request_schema import NotifyTokenUpdatedRequestSchema
from openapi_client.model.notify_token_updated_response_schema import NotifyTokenUpdatedResponseSchema
from openapi_client.model.phone_number import PhoneNumber
from openapi_client.model.product_config import ProductConfig
from openapi_client.model.search_tokens_request_schema import SearchTokensRequestSchema
from openapi_client.model.search_tokens_response_schema import SearchTokensResponseSchema
from openapi_client.model.suspend_request_schema import SuspendRequestSchema
from openapi_client.model.suspend_response_schema import SuspendResponseSchema
from openapi_client.model.token import Token
from openapi_client.model.token_detail import TokenDetail
from openapi_client.model.token_detail_data import TokenDetailData
from openapi_client.model.token_detail_data_get_token_only import TokenDetailDataGetTokenOnly
from openapi_client.model.token_detail_data_par_only import TokenDetailDataPAROnly
from openapi_client.model.token_detail_get_token_only import TokenDetailGetTokenOnly
from openapi_client.model.token_detail_par_only import TokenDetailPAROnly
from openapi_client.model.token_for_get_token import TokenForGetToken
from openapi_client.model.token_for_lcm import TokenForLCM
from openapi_client.model.token_for_ntu import TokenForNTU
from openapi_client.model.token_info import TokenInfo
from openapi_client.model.token_info_for_ntu_and_get_token import TokenInfoForNTUAndGetToken
from openapi_client.model.tokenize_request_schema import TokenizeRequestSchema
from openapi_client.model.tokenize_response_schema import TokenizeResponseSchema
from openapi_client.model.transact_encrypted_data import TransactEncryptedData
from openapi_client.model.transact_error import TransactError
from openapi_client.model.transact_request_schema import TransactRequestSchema
from openapi_client.model.transact_response_schema import TransactResponseSchema
from openapi_client.model.un_suspend_request_schema import UnSuspendRequestSchema
from openapi_client.model.un_suspend_response_schema import UnSuspendResponseSchema
| 68.015152 | 106 | 0.908443 |
cf7b4459e0c08b1ca6126becd13407b5cfed3ac1 | 68 | py | Python | main.py | nickfullerton/BasketballStatTracker | 6c321154cfc3cb51eb501edeacaf2d38b7c716e5 | [
"MIT"
] | null | null | null | main.py | nickfullerton/BasketballStatTracker | 6c321154cfc3cb51eb501edeacaf2d38b7c716e5 | [
"MIT"
] | null | null | null | main.py | nickfullerton/BasketballStatTracker | 6c321154cfc3cb51eb501edeacaf2d38b7c716e5 | [
"MIT"
] | null | null | null | from src.MainMenu import *
main_menu = MainMenu()
main_menu.run()
| 11.333333 | 26 | 0.735294 |
7e35cf5b2efda8b13ed15df33ca283308d78efff | 3,106 | py | Python | lib/svtplay_dl/service/dr.py | mrcarlberg/svtplay-dl | e92ebbbcab2d529fe5dca25b5c3195b2b2fdc4e8 | [
"MIT"
] | null | null | null | lib/svtplay_dl/service/dr.py | mrcarlberg/svtplay-dl | e92ebbbcab2d529fe5dca25b5c3195b2b2fdc4e8 | [
"MIT"
] | 1 | 2020-09-04T18:09:05.000Z | 2020-09-04T18:09:05.000Z | lib/svtplay_dl/service/dr.py | magic75/svtplay-dl | 6bde2e76bfe3a0bb57ed902055d341aec528c9f2 | [
"MIT"
] | null | null | null | import copy
import json
import logging
import re
import uuid
from svtplay_dl.error import ServiceError
from svtplay_dl.fetcher.hls import hlsparse
from svtplay_dl.service import OpenGraphThumbMixin
from svtplay_dl.service import Service
from svtplay_dl.subtitle import subtitle
class Dr(Service, OpenGraphThumbMixin):
supported_domains = ["dr.dk"]
def get(self):
data = self.get_urldata()
match = re.search("__data = ([^<]+)</script>", data)
if not match:
yield ServiceError("Cant find info for this video")
return
janson = json.loads(match.group(1))
page = janson["cache"]["page"][list(janson["cache"]["page"].keys())[0]]
offers = page["entries"][0]["item"]["offers"]
resolution = None
vid = None
for i in offers:
if i["deliveryType"] == "Stream":
vid = i["scopes"][0]
resolution = i["resolution"]
deviceid = uuid.uuid1()
res = self.http.request(
"post",
"https://isl.dr-massive.com/api/authorization/anonymous-sso?device=web_browser&ff=idp%2Cldp&lang=da",
json={"deviceId": str(deviceid), "scopes": ["Catalog"], "optout": True},
)
token = res.json()[0]["value"]
url = "https://isl.dr-massive.com/api/account/items/{}/videos?delivery=stream&device=web_browser&ff=idp%2Cldp&lang=da&resolution={}&sub=Anonymous".format(
vid, resolution
)
res = self.http.request("get", url, headers={"authorization": "Bearer {}".format(token)})
for video in res.json():
if video["accessService"] == "StandardVideo":
if video["format"] == "video/hls":
res = self.http.request("get", video["url"])
if res.status_code > 400:
yield ServiceError("Can't play this because the video is geoblocked or not available.")
else:
streams = hlsparse(self.config, res, video["url"], output=self.output)
for n in list(streams.keys()):
yield streams[n]
yield subtitle(copy.copy(self.config), "wrst", video["subtitles"][0]["link"], output=self.output)
def find_all_episodes(self, config):
episodes = []
data = self.get_urldata()
match = re.search("__data = ([^<]+)</script>", data)
if not match:
logging.error("Can't find video info.")
return episodes
janson = json.loads(match.group(1))
page = janson["cache"]["page"][list(janson["cache"]["page"].keys())[0]]
item = page["entries"][0]["item"]
if "season" in item:
entries = item["season"]["episodes"]["items"]
for i in entries:
episodes.append("https://www.dr.dk/drtv{}".format(i["watchPath"]))
if config.get("all_last") != -1:
episodes = episodes[: config.get("all_last")]
else:
episodes.reverse()
return episodes
| 39.820513 | 162 | 0.55924 |
1eb6832a283b007ce139b21be137fc3c4baaec6f | 6,996 | py | Python | setting.py | ankworld/Rpi_Therapy | e6fa454098a3947d29e469b4bbacb26fe322552f | [
"MIT"
] | null | null | null | setting.py | ankworld/Rpi_Therapy | e6fa454098a3947d29e469b4bbacb26fe322552f | [
"MIT"
] | null | null | null | setting.py | ankworld/Rpi_Therapy | e6fa454098a3947d29e469b4bbacb26fe322552f | [
"MIT"
] | null | null | null | import sys
import configparser
import re
import os
from PyQt5.QtWidgets import QApplication, QMainWindow, QLineEdit, QGroupBox
from ui import controlpanel
class Config(object):
def __init__(self):
self.path = os.path.dirname(os.path.abspath(__file__))
self.list_of_value = []
def write_config_file(self, section, options):
cfg = configparser.ConfigParser()
cfg.read(self.path + '/config/motor.ini')
cfg[section] = {}
cfg[section]['high_pin'] = options[0]
cfg[section]['low_pin'] = options[1]
cfg[section]['freq'] = options[2]
with open(self.path + '/config/motor.ini', 'w') as configfile:
cfg.write(configfile)
def write_config_sensor(self, section, options):
cfg = configparser.ConfigParser()
cfg.read(self.path + '/config/sensor.ini')
cfg[section] = {}
cfg[section]['input_1'] = options[0]
cfg[section]['input_2'] = options[1]
cfg[section]['input_3'] = options[2]
cfg[section]['input_4'] = options[3]
cfg[section]['input_5'] = options[4]
cfg[section]['input_6'] = options[5]
with open(self.path + '/config/sensor.ini', 'w') as configfile:
cfg.write(configfile)
def read_config_file(self, section):
self.list_of_value = []
cfg = configparser.ConfigParser()
cfg.read(self.path + '/config/motor.ini')
try:
for option in cfg[section].values():
if option != "":
self.list_of_value.append(int(float(option)))
else:
self.list_of_value.append(0)
except KeyError:
pass
return self.list_of_value
def read_config_sensor(self, section):
self.list_of_value = []
cfg = configparser.ConfigParser()
cfg.read(self.path + '/config/sensor.ini')
try:
for option in cfg[section].values():
if option != "":
self.list_of_value.append(int(float(option)))
else:
self.list_of_value.append(0)
except KeyError:
pass
return self.list_of_value
class Ui(object):
def __init__(self):
self.list_used_pin = []
self.high_pin = 0
self.low_pin = 0
self.freq = 0
self.window = QMainWindow()
self.uic = controlpanel.Ui_ControlPanelMW()
self.uic.setupUi(self.window)
self.load()
self.uic.saveMotor1.clicked.connect(self.save)
self.uic.saveMotor2.clicked.connect(self.save)
self.uic.saveSensor.clicked.connect(self.save_sensor)
self.uic.saveAll.clicked.connect(self.save_all)
self.uic.loadAll.clicked.connect(self.load)
self.window.show()
def unique_pin(self):
self.list_used_pin = []
self.list_used_pin.append(self.uic.highPinM1.text())
self.list_used_pin.append(self.uic.highPinM2.text())
self.list_used_pin.append(self.uic.lowPinM1.text())
self.list_used_pin.append(self.uic.lowPinM2.text())
self.list_used_pin.append(self.uic.input1.text())
self.list_used_pin.append(self.uic.input2.text())
self.list_used_pin.append(self.uic.input3.text())
self.list_used_pin.append(self.uic.input4.text())
self.list_used_pin.append(self.uic.input5.text())
self.list_used_pin.append(self.uic.input6.text())
print(len(self.list_used_pin))
print(len(set(self.list_used_pin)))
if len(self.list_used_pin) > len(set(self.list_used_pin)):
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Error : Duplicate Pin")
msg.setWindowTitle("Error")
msg.setStandardButtons(QMessageBox.Ok)
msg.exec_()
return False
else:
return True
def save(self):
if self.unique_pin():
sender = self.window.sender()
list_of_line_edit = sender.parent().findChildren(QLineEdit)
section = sender.parent().title()
for item in list_of_line_edit:
reg_name = re.sub(r'\w\d', r'', item.objectName())
if reg_name == "highPin":
self.high_pin = item.text()
elif reg_name == "lowPin":
self.low_pin = item.text()
elif reg_name == "freqPin":
self.freq = item.text()
list_of_pin = [self.high_pin, self.low_pin, self.freq]
Config().write_config_file(section, list_of_pin)
def save_sensor(self):
if self.unique_pin():
section = "Sensor"
list_of_input = [self.uic.input1.text(), self.uic.input2.text(), self.uic.input3.text(),
self.uic.input4.text(), self.uic.input5.text(), self.uic.input6.text()]
Config().write_config_sensor(section, list_of_input)
def load(self):
name_gb = []
for gb in self.uic.centralwidget.findChildren(QGroupBox):
name_gb.append(gb.objectName())
name_gb.sort()
i = 0
sorted_gb = []
while i < 2:
for gb in self.uic.centralwidget.findChildren(QGroupBox):
if gb.objectName() == name_gb[i]:
sorted_gb.append(gb)
i += 1
break
section = ""
for i in range(2):
section = "Motor " + str(i + 1)
value = Config().read_config_file(section)
if len(value) == 0:
pass
else:
gb = sorted_gb[i]
list_of_line_edit = gb.findChildren(QLineEdit)
for item in list_of_line_edit:
reg_name = re.sub(r'\w\d', r'', item.objectName())
if reg_name == "highPin":
item.setText(str(value[0]))
elif reg_name == "lowPin":
item.setText(str(value[1]))
elif reg_name == "freqPin":
item.setText(str(value[2]))
##########################################################################
# Load to sensor section
section = "Sensor"
value = Config().read_config_sensor(section)
if len(value) == 0:
pass
else:
self.uic.input1.setText(str(value[0]))
self.uic.input2.setText(str(value[1]))
self.uic.input3.setText(str(value[2]))
self.uic.input4.setText(str(value[3]))
self.uic.input5.setText(str(value[4]))
self.uic.input6.setText(str(value[5]))
def save_all(self):
self.uic.saveMotor1.click()
self.uic.saveMotor2.click()
self.uic.saveSensor.click()
if __name__ == "__main__":
app = QApplication(sys.argv)
app.setStyle("fusion")
ex = Ui()
sys.exit(app.exec_())
| 34.126829 | 100 | 0.555031 |
391ec6b8f1f979e196436b05bf9f617179fc62e8 | 3,164 | py | Python | py4cytoscape/__init__.py | bdemchak/PyCy3 | 4058e41689c78304812b1e6fd8371d797cbb6b5b | [
"MIT"
] | 1 | 2020-02-10T12:50:35.000Z | 2020-02-10T12:50:35.000Z | py4cytoscape/__init__.py | bdemchak/PyCy3 | 4058e41689c78304812b1e6fd8371d797cbb6b5b | [
"MIT"
] | 2 | 2020-02-14T21:19:27.000Z | 2020-04-21T21:30:26.000Z | py4cytoscape/__init__.py | bdemchak/PyCy3 | 4058e41689c78304812b1e6fd8371d797cbb6b5b | [
"MIT"
] | 1 | 2020-02-10T17:16:17.000Z | 2020-02-10T17:16:17.000Z | # -*- coding:utf-8 -*-
""" Interface for Py4Cytoscape.
"""
"""Copyright 2020 The Cytoscape Consortium
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from .networks import *
from .session import *
from .layouts import *
from .network_selection import *
from .tables import *
from .commands import *
from .cytoscape_system import *
from .apps import *
from .collections import *
from .filters import *
from .groups import *
from .tools import *
from .user_interface import *
from .network_views import *
from .styles import *
from .style_mappings import *
from .style_auto_mappings import *
from .style_defaults import *
from .style_values import *
from .style_dependencies import *
from .style_bypasses import *
from .py4cytoscape_utils import *
from .cy_ndex import *
from .decorators import *
from .py4cytoscape_notebook import get_browser_client_js, get_browser_client_channel, get_jupyter_bridge_url, get_notebook_is_running, set_notebook_is_running
from .py4cytoscape_logger import set_summary_logger
from .sandbox import *
from .py4cytoscape_sandbox import *
from .py4cytoscape_tuning import set_catchup_filter_secs, set_catchup_network_secs, set_model_propagation_secs
from ._version import __version__
from .notebook import *
from .annotations import *
# Note that we have tried to enforce documentation standards for modules and private functions per:
# https://www.python.org/dev/peps/pep-0257/ and https://www.python.org/dev/peps/pep-0008/#comments
# https://google.github.io/styleguide/pyguide.html
# TODO: Remember to set __all__ to enumerate what modules are exported from this package. Do this at the module level, too, for functions ... consider using a decorator to fill the __all__ per https://stackoverflow.com/questions/44834/can-someone-explain-all-in-python
# TODO: Add type hints per https://www.python.org/dev/peps/pep-0484/ for all functions
# TODO: Remember to execute pylint: https://stackoverflow.com/questions/38134086/how-to-run-pylint-with-pycharm
# Note that use of "import" statement project wide per advice of:
# https://stackoverflow.com/questions/44834/can-someone-explain-all-in-python
# http://effbot.org/zone/import-confusion.htm
| 46.529412 | 268 | 0.795828 |
566ba87d23782110e61a2b1fe550c5f8534aed46 | 7,613 | py | Python | sciapp/action/advanced/filter.py | pengguanjun/imagepy | d96ef98c2c3e93d368131fd2753bce164e1247cd | [
"BSD-4-Clause"
] | 1 | 2020-08-17T04:18:35.000Z | 2020-08-17T04:18:35.000Z | sciapp/action/advanced/filter.py | pengguanjun/imagepy | d96ef98c2c3e93d368131fd2753bce164e1247cd | [
"BSD-4-Clause"
] | null | null | null | sciapp/action/advanced/filter.py | pengguanjun/imagepy | d96ef98c2c3e93d368131fd2753bce164e1247cd | [
"BSD-4-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 2 23:48:33 2016
@author: yxl
"""
import threading
import numpy as np
from time import time, sleep
def process_channels(plg, ips, src, des, para):
if ips.channels>1 and not 'not_channel' in plg.note:
for i in range(ips.channels):
rst = plg.run(ips, src if src is None else src[:,:,i], des[:,:,i], para)
if not rst is des and not rst is None:
des[:,:,i] = rst
else:
rst = plg.run(ips, src, des, para)
if not rst is des and not rst is None:
des[:] = rst
return des
def process_one(plg, ips, src, img, para, callafter=None):
plg.app.add_task(plg)
start = time()
transint = '2int' in plg.note and ips.dtype in (np.uint8, np.uint16)
transfloat = '2float' in plg.note and not ips.dtype in (np.complex128, np.float32, np.float64)
if transint:
buf = img.astype(np.int32)
src = src.astype(np.int32)
if transfloat:
buf = img.astype(np.float32)
src = src.astype(np.float32)
rst = process_channels(plg, ips, src, buf if transint or transfloat else img, para)
if not img is rst and not rst is None:
imgrange = {np.uint8:(0,255), np.uint16:(0, 65535)}[img.dtype.type]
np.clip(rst, imgrange[0], imgrange[1], out=img)
if 'auto_msk' in plg.note and not ips.mask('out') is None:
msk = ips.mask('out')
img[msk] = src[msk]
plg.app.info('%s: cost %.3fs'%(ips.title, time()-start))
ips.update()
plg.app.remove_task(plg)
if not callafter is None:callafter()
def process_stack(plg, ips, src, imgs, para, callafter=None):
plg.app.add_task(plg)
start = time()
transint = '2int' in plg.note and ips.dtype in (np.uint8, np.uint16)
transfloat = '2float' in plg.note and not ips.dtype in (np.complex128, np.float32, np.float64)
if transint:
buf = imgs[0].astype(np.int32)
src = src.astype(np.int32)
elif transfloat:
buf = imgs[0].astype(np.float32)
src = src.astype(np.float32)
else: src = src * 1
for i,n in zip(imgs,list(range(len(imgs)))):
#sleep(0.5)
plg.progress(n, len(imgs))
if 'auto_snap' in plg.note : src[:] = i
if transint or transfloat: buf[:] = i
rst = process_channels(plg, ips, src, buf if transint or transfloat else i, para)
if not i is rst and not rst is None:
imgrange = {np.uint8:(0,255), np.uint16:(0,65535)}[i.dtype.type]
np.clip(rst, imgrange[0], imgrange[1], out=i)
if 'auto_msk' in plg.note and not ips.mask() is None:
msk = ips.mask('out')
i[msk] = src[msk]
plg.app.info('%s: cost %.3fs'%(ips.title, time()-start))
ips.update()
plg.app.remove_task(plg)
if not callafter is None:callafter()
class Filter:
title = 'Filter'
modal = True
note = []
'all, 8-bit, 16-bit, int, rgb, float, not_channel, not_slice, req_roi, auto_snap, auto_msk, preview, 2int, 2float'
para = None
view = None
prgs = None
def __init__(self, ips=None): pass
def progress(self, i, n): self.prgs = int(i*100/n)
def show(self):
preview = lambda para, ips=self.ips: self.preview(ips, para) or ips.update()
return self.app.show_para(self.title, self.view, self.para, preview,
on_ok=lambda : self.ok(self.ips), on_help=self.on_help,
on_cancel=lambda : self.cancel(self.ips) or self.ips.update(),
preview='preview' in self.note, modal=self.modal)
def run(self, ips, snap, img, para = None):
return 255-img
def check(self, ips):
note = self.note
if ips == None:
return self.app.alert('No image opened!')
return False
elif 'req_roi' in note and ips.roi == None:
return self.app.alert('No Roi found!')
elif not 'all' in note:
if ips.dtype==np.uint8 and ips.channels==3 and not 'rgb' in note:
return self.app.alert('Do not surport rgb image')
elif ips.dtype==np.uint8 and ips.channels==1 and not '8-bit' in note:
return self.app.alert('Do not surport 8-bit image')
elif ips.dtype==np.uint16 and not '16-bit' in note:
return self.app.alert('Do not surport 16-bit uint image')
elif ips.dtype in [np.int32, np.int64] and not 'int' in note:
return self.app.alert('Do not surport int image')
elif ips.dtype in [np.float32, np.float64] and not 'float' in note:
return self.app.alert('Do not surport float image')
return True
def preview(self, ips, para):
process_one(self, ips, ips.snap, ips.img, para, None)
def load(self, ips):return True
def ok(self, ips, para=None, callafter=None):
if para == None:
para = self.para
if not 'not_slice' in self.note and ips.slices>1:
if para == None:para = {}
if para!=None and 'stack' in para:del para['stack']
# = WidgetsManager.getref('Macros Recorder')
if ips.slices==1 or 'not_slice' in self.note:
# process_one(self, ips, ips.snap, ips.img, para)
threading.Thread(target = process_one, args =
(self, ips, ips.snap, ips.img, para, callafter)).start()
# if win!=None: win.write('{}>{}'.format(self.title, para))
self.app.record_macros('{}>{}'.format(self.title, para))
elif ips.slices>1:
has, rst = 'stack' in para, None
if not has:
rst = self.app.yes_no('Run every slice in current stacks?')
if 'auto_snap' in self.note and self.modal:ips.reset()
if has and para['stack'] or rst == 'yes':
para['stack'] = True
#process_stack(self, ips, ips.snap, ips.imgs, para)
threading.Thread(target = process_stack, args =
(self, ips, ips.snap, ips.imgs, para, callafter)).start()
self.app.record_macros('{}>{}'.format(self.title, para))
elif has and not para['stack'] or rst == 'no':
para['stack'] = False
#process_one(self, ips, ips.snap, ips.img, para)
threading.Thread(target = process_one, args =
(self, ips, ips.snap, ips.img, para, callafter)).start()
self.app.record_macros('{}>{}'.format(self.title, para))
elif rst == 'cancel': pass
#ips.update()
def on_help(self):
self.app.show_md(self.__doc__ or 'No Document!', self.title)
def cancel(self, ips):
if 'auto_snap' in self.note:
ips.img[:] = ips.snap
ips.update()
def start(self, app, para=None, callafter=None):
self.app, self.ips = app, app.get_img()
if not self.check(self.ips):return
if not self.load(self.ips):return
if 'auto_snap' in self.note:self.ips.snapshot()
if para!=None:
self.ok(self.ips, para, callafter)
elif self.view==None:
if not self.__class__.show is Filter.show:
if self.show():
self.ok(self.ips, para, callafter)
else: self.ok(self.ips, para, callafter)
elif self.modal:
if self.show():
self.ok(self.ips, None, callafter)
else:self.cancel(self.ips)
else: self.show()
def __del__(self):
print('filter del')
| 40.71123 | 118 | 0.567188 |
c982a3d5f6ee85d7ca9294af3f91099f83ded903 | 1,954 | py | Python | tests/contracts/interop/test_binary.py | ixje/neo-mamba | 8b8a7bf2e600f89b91caff253f25c1c8afee6c0a | [
"MIT"
] | null | null | null | tests/contracts/interop/test_binary.py | ixje/neo-mamba | 8b8a7bf2e600f89b91caff253f25c1c8afee6c0a | [
"MIT"
] | null | null | null | tests/contracts/interop/test_binary.py | ixje/neo-mamba | 8b8a7bf2e600f89b91caff253f25c1c8afee6c0a | [
"MIT"
] | 1 | 2021-05-12T08:23:33.000Z | 2021-05-12T08:23:33.000Z | import unittest
from neo3 import vm
from .utils import test_engine
class BinaryInteropTestCase(unittest.TestCase):
def test_serialization(self):
engine = test_engine()
engine.push(vm.IntegerStackItem(100))
engine.invoke_syscall_by_name("System.Binary.Serialize")
item = engine.pop()
self.assertIsInstance(item, vm.ByteStringStackItem)
self.assertEqual(b'\x21\x01\x64', item.to_array())
# Create an item with data larger than engine.MAX_ITEM_SIZE
# this should fail in the BinarySerializer class
engine.push(vm.ByteStringStackItem(b'\x01' * (1024 * 1024 * 2)))
with self.assertRaises(ValueError) as context:
engine.invoke_syscall_by_name("System.Binary.Serialize")
self.assertEqual("Output length exceeds max size", str(context.exception))
def test_deserialization(self):
engine = test_engine()
original_item = vm.IntegerStackItem(100)
engine.push(original_item)
engine.invoke_syscall_by_name("System.Binary.Serialize")
engine.invoke_syscall_by_name("System.Binary.Deserialize")
item = engine.pop()
self.assertEqual(original_item, item)
engine.push(vm.ByteStringStackItem(b'\xfa\x01'))
with self.assertRaises(ValueError) as context:
engine.invoke_syscall_by_name("System.Binary.Deserialize")
self.assertEqual("Invalid format", str(context.exception))
def test_base64(self):
engine = test_engine()
original_item = vm.IntegerStackItem(100)
engine.push(original_item)
engine.invoke_syscall_by_name("System.Binary.Base64Encode")
item = engine.pop()
self.assertEqual('ZA==', item.to_array().decode())
engine.push(item)
engine.invoke_syscall_by_name("System.Binary.Base64Decode")
item = engine.pop()
self.assertEqual(original_item, vm.IntegerStackItem(item.to_array()))
| 40.708333 | 82 | 0.688843 |
6ab814b828aca9f4c81ee16d30f8613398ab4672 | 258 | py | Python | Python/problem0973.py | 1050669722/LeetCode-Answers | c8f4d1ccaac09cda63b60d75144335347b06dc81 | [
"MIT"
] | null | null | null | Python/problem0973.py | 1050669722/LeetCode-Answers | c8f4d1ccaac09cda63b60d75144335347b06dc81 | [
"MIT"
] | null | null | null | Python/problem0973.py | 1050669722/LeetCode-Answers | c8f4d1ccaac09cda63b60d75144335347b06dc81 | [
"MIT"
] | null | null | null | class Solution:
def kClosest(self, points: List[List[int]], K: int) -> List[List[int]]:
for List in points:
List.append(List[0]**2 + List[1]**2)
points.sort(key=lambda x: x[2])
return [List[:2] for List in points[:K]]
| 36.857143 | 75 | 0.565891 |
2faa6c41c51583d9e01fb780bcfc3176e857571f | 2,092 | py | Python | newsTraining.py | iprashant2402/newsquoo-backend-python | fef21eefdb1160c3728067706798cc8020c08eb8 | [
"Apache-2.0"
] | null | null | null | newsTraining.py | iprashant2402/newsquoo-backend-python | fef21eefdb1160c3728067706798cc8020c08eb8 | [
"Apache-2.0"
] | null | null | null | newsTraining.py | iprashant2402/newsquoo-backend-python | fef21eefdb1160c3728067706798cc8020c08eb8 | [
"Apache-2.0"
] | null | null | null | import sys
import numpy as np
import pandas as pd
import pickle
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
from sklearn.cluster import MiniBatchKMeans
import nltk
from nltk import pos_tag
from nltk.stem import PorterStemmer
from nltk import word_tokenize
import re
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
import json
X = pd.read_json("./dataset/news_data.json",orient="records")
X = X[pd.isna(X['title'])==False]
X = X[pd.isna(X['content'])==False]
stemmer = PorterStemmer()
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
progress = 0
def stem(x):
dirty = word_tokenize(x)
tokens = []
for word in dirty:
if word.strip('.') == '':
pass
elif re.search(r'\d{1,}', word):
pass
else:
tokens.append(word.strip('.'))
global start
global progress
tokens = pos_tag(tokens) #
progress += 1
stems = ' '.join(stemmer.stem(key.lower()) for key, value in tokens if value != 'NNP') #getting rid of proper nouns
# end = time.time()
# sys.stdout.write('\r {} percent, {} position, {} per second '.format(str(float(progress / len(articles))),
# str(progress), (1 / (end - start)))) #lets us see how much time is left
# start = time.time()
return stems
X['content'].dropna(inplace=True)
X['stems'] = X['content'].apply(lambda x: stem(x))
print(X.info())
text_content = X['stems']
vector = TfidfVectorizer(max_df=0.3,
min_df=8,
stop_words='english',
lowercase=True,
use_idf=True,
norm=u'l2',
smooth_idf=True
)
tfidf = vector.fit_transform(text_content)
pickle.dump(X, open('output/X', 'wb'))
pickle.dump(vector, open('output/vector', 'wb'))
pickle.dump(tfidf, open('output/tfidf', 'wb'))
updated_news_data = X.to_json("dataset/filtered_news_data.json", orient="records") | 30.318841 | 120 | 0.614723 |
6e911194b8ebce71e32f62a68bd4c2a3f652a9e3 | 1,270 | py | Python | plotly/validators/candlestick/increasing/__init__.py | piyush1301/plotly.py | 50cd5c4cd4732042422751c7760acbab8dd8a50d | [
"MIT"
] | 6 | 2019-05-03T02:12:04.000Z | 2020-03-01T06:33:21.000Z | plotly/validators/candlestick/increasing/__init__.py | piyush1301/plotly.py | 50cd5c4cd4732042422751c7760acbab8dd8a50d | [
"MIT"
] | null | null | null | plotly/validators/candlestick/increasing/__init__.py | piyush1301/plotly.py | 50cd5c4cd4732042422751c7760acbab8dd8a50d | [
"MIT"
] | 5 | 2019-05-18T16:50:11.000Z | 2021-07-06T21:14:36.000Z |
import _plotly_utils.basevalidators
class LineValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self,
plotly_name='line',
parent_name='candlestick.increasing',
**kwargs
):
super(LineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop('data_class_str', 'Line'),
data_docs=kwargs.pop(
'data_docs', """
color
Sets the color of line bounding the box(es).
width
Sets the width (in px) of line bounding the
box(es).
"""
),
**kwargs
)
import _plotly_utils.basevalidators
class FillcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name='fillcolor',
parent_name='candlestick.increasing',
**kwargs
):
super(FillcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop('anim', True),
edit_type=kwargs.pop('edit_type', 'style'),
role=kwargs.pop('role', 'style'),
**kwargs
)
| 25.4 | 70 | 0.569291 |
431d08f7cce35b68a7023500e1daf247533a5693 | 1,094 | py | Python | no_agent2/policy_learner.py | songaal/rltrader | 4aac8085dda1a58fbf30a313f2a4608398c971a3 | [
"MIT"
] | 2 | 2020-06-13T07:18:10.000Z | 2020-11-03T03:46:40.000Z | no_agent2/policy_learner.py | songaal/rltrader | 4aac8085dda1a58fbf30a313f2a4608398c971a3 | [
"MIT"
] | null | null | null | no_agent2/policy_learner.py | songaal/rltrader | 4aac8085dda1a58fbf30a313f2a4608398c971a3 | [
"MIT"
] | 1 | 2020-05-16T08:41:29.000Z | 2020-05-16T08:41:29.000Z | import os
import locale
import logging
import numpy as np
from keras.datasets import mnist
from no_agent2.policy_network import PolicyNetwork
logger = logging.getLogger(__name__)
locale.setlocale(locale.LC_ALL, 'ko_KR.UTF-8')
class PolicyLearner:
def __init__(self, symbol, x_train, lr=0.01):
self.symbol = symbol
self.x_train = x_train # 학습 데이터
self.num_features = self.x_train.shape
self.policy_network = PolicyNetwork(input_dim=self.num_features, lr=lr)
def fit(self, x_train, y_train, x_test, y_test, num_epoches=1000, batch_size=10, model_path=None):
self.policy_network.fit(x_train=x_train, y_train=y_train,
epochs=num_epoches, batch_size=batch_size,
x_test=x_test, y_test=y_test,
model_path=model_path)
def trade(self, x, model_path=None):
if model_path is None:
return
self.policy_network.load_model(model_path=model_path)
return self.policy_network.predict(x)
| 33.151515 | 103 | 0.647166 |
87944611299ba09a1ec38453ec010641f3125d31 | 602 | py | Python | main.py | msidolphin/JobboleArticleSpider | e7d4ff4db03934baa65ebe90c5762ffd8c709f9d | [
"Apache-2.0"
] | null | null | null | main.py | msidolphin/JobboleArticleSpider | e7d4ff4db03934baa65ebe90c5762ffd8c709f9d | [
"Apache-2.0"
] | null | null | null | main.py | msidolphin/JobboleArticleSpider | e7d4ff4db03934baa65ebe90c5762ffd8c709f9d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = "msidolphin"
from scrapy.cmdline import execute
import sys
import os
'''
sys.path.append("...") : 当我们导入某个模块时,当我们导入一个模块时:
import xxx,默认情况下python解析器会搜索当前目录、已安装的内置模块和第三方模块,搜索路径存放在sys模块的path中
该路径已经添加到系统的环境变量了,当我们要添加自己的搜索目录时,可以通过列表的append()方法
可以将该路径写死,可以一旦目录改变了,就会出问题
'''
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# print(os.path.dirname(os.path.abspath(__file__))) # 获取main.py目录的绝对路径 F:\pySpider\ArticleSpider
execute(["scrapy", "crawl", "jobbole"]) # 配置scrapy 命令 相当于在命令行中使用scrapy crawl jobbole 命令启动一个爬虫
# 这样就在pycharm中调试scrapy了
| 31.684211 | 98 | 0.754153 |
c62211be52a52394be820e038b68f6f9e48d8f51 | 1,566 | py | Python | ipsumminer/node.py | zawoad/ipsum-miner | eb9b72ef7048c3fbf9f4a106578ee58312713c68 | [
"Apache-2.0"
] | 1 | 2017-03-08T21:20:16.000Z | 2017-03-08T21:20:16.000Z | ipsumminer/node.py | zawoad/ipsum-miner | eb9b72ef7048c3fbf9f4a106578ee58312713c68 | [
"Apache-2.0"
] | null | null | null | ipsumminer/node.py | zawoad/ipsum-miner | eb9b72ef7048c3fbf9f4a106578ee58312713c68 | [
"Apache-2.0"
] | null | null | null | import logging
import requests
import bs4 # we use bs4 to parse the HTML page
from minemeld.ft.basepoller import BasePollerFT
LOG = logging.getLogger(__name__)
class Miner(BasePollerFT):
def configure(self):
super(Miner, self).configure()
self.polling_timeout = self.config.get('polling_timeout', 3600)
self.verify_cert = self.config.get('verify_cert', False)
self.url = 'https://raw.githubusercontent.com/stamparm/ipsum/master/ipsum.txt'
def _build_iterator(self, item):
# builds the request and retrieves the page
rkwargs = dict(
stream=False,
verify=self.verify_cert,
timeout=self.polling_timeout
)
r = requests.get(
self.url,
**rkwargs
)
try:
r.raise_for_status()
except:
LOG.debug('%s - exception in request: %s %s',
self.name, r.status_code, r.content)
raise
# parse the page
con = r.text.decode('utf-8')
lines = con.split("\n")
result = []
for s in lines:
if s.startswith("#") == False:
result.append(s)
return result
def _process_item(self, item):
arr = item.split("\t")
if len(arr) < 1:
LOG.error('%s - no data-context-item-id attribute', self.name)
return []
indicator = arr[0]
value = {
'type': 'IPv4',
'confidence': 50
}
return [[indicator, value]]
| 25.258065 | 86 | 0.544061 |
a8b0d5d1bfbaf09ae893f501dd9093127864a3fa | 922 | py | Python | shops/cushine.py | ikp4success/shopasource | 9a9ed5c58a8b37b6ff169b45f7fdfcb44809fd88 | [
"Apache-2.0"
] | 3 | 2019-12-04T07:08:55.000Z | 2020-12-08T01:38:46.000Z | shops/cushine.py | ikp4success/shopasource | 9a9ed5c58a8b37b6ff169b45f7fdfcb44809fd88 | [
"Apache-2.0"
] | null | null | null | shops/cushine.py | ikp4success/shopasource | 9a9ed5c58a8b37b6ff169b45f7fdfcb44809fd88 | [
"Apache-2.0"
] | null | null | null | from shops.shop_base import ShopBase
class Cushine(ShopBase):
name = "CUSHINE"
_search_keyword = None
def parse_results(self, response):
json_data = self.safe_json(response.text)
t_data = self.safe_grab(json_data, ["response", "docs"], default=[])
for item in t_data:
title = self.safe_grab(item, ["name"])
image_url = self.safe_grab(item, ["image_varchar"])[0]
url = self.safe_grab(item, ["url"])
description = self.safe_grab(item, ["description"])
price = self.safe_grab(item, ["final_price"])
yield self.generate_result_meta(
shop_link=url,
image_url=image_url,
shop_name=self.name,
price=price,
title=title,
searched_keyword=self._search_keyword,
content_description=description,
)
| 34.148148 | 76 | 0.572668 |
d4d113b3fa96c52c9d17ef0d6445b00912580b01 | 22,144 | py | Python | share/smack/svcomp/utils.py | jjgarzella/smack | 1458fa20de42af69b40b09de00256c799574ca52 | [
"MIT"
] | null | null | null | share/smack/svcomp/utils.py | jjgarzella/smack | 1458fa20de42af69b40b09de00256c799574ca52 | [
"MIT"
] | null | null | null | share/smack/svcomp/utils.py | jjgarzella/smack | 1458fa20de42af69b40b09de00256c799574ca52 | [
"MIT"
] | null | null | null | import os
import re
import sys
import subprocess
import time
from shutil import copyfile
import smack.top
import filters
from toSVCOMPformat import smackJsonToXmlGraph
from random_testing import random_test
def svcomp_frontend(args):
"""Generate Boogie code from SVCOMP-style C-language source(s)."""
# enable static LLVM unroll pass
args.static_unroll = True
# disable dynamic execution
args.execute = False
if len(args.input_files) > 1:
raise RuntimeError("Expected a single SVCOMP input file.")
# check svcomp properties and set flags accordingly
svcomp_check_property(args)
# fix: disable float filter for memory safety benchmarks
if not args.memory_safety:
# test bv and executable benchmarks
file_type, executable = filters.svcomp_filter(args.input_files[0])
if file_type == 'bitvector':
args.bit_precise = True
args.bit_precise_pointers = True
if file_type == 'float' and not args.signed_integer_overflow:
#sys.exit(smack.top.results(args)['unknown'])
args.float = True
args.bit_precise = True
args.bit_precise_pointers = True
#args.verifier = 'boogie'
args.time_limit = 1000
args.unroll = 100
args.execute = executable
else:
with open(args.input_files[0], "r") as sf:
sc = sf.read()
if 'unsigned char b:2' in sc or "4294967294u" in sc:
args.bit_precise = True
#args.bit_precise_pointers = True
name, ext = os.path.splitext(os.path.basename(args.input_files[0]))
svcomp_process_file(args, name, ext)
args.clang_options += " -DSVCOMP"
args.clang_options += " -DAVOID_NAME_CONFLICTS"
args.clang_options += " -DCUSTOM_VERIFIER_ASSERT"
args.clang_options += " -DNO_FORALL"
args.clang_options += " -DDISABLE_PTHREAD_ASSERTS"
args.clang_options += " -include smack.h"
if os.path.splitext(args.input_files[0])[1] == ".i":
# Ensure clang runs the preprocessor, even with .i extension.
args.clang_options += " -x c"
smack.top.clang_frontend(args)
def svcomp_check_property(args):
# Check if property is vanilla reachability, and return unknown otherwise
if args.svcomp_property:
with open(args.svcomp_property, "r") as f:
prop = f.read()
if "valid-deref" in prop:
args.memory_safety = True
elif "overflow" in prop:
args.signed_integer_overflow = True
elif not "__VERIFIER_error" in prop:
sys.exit(smack.top.results(args)['unknown'])
def svcomp_process_file(args, name, ext):
args.orig_files = list(args.input_files)
with open(args.input_files[0], 'r') as fi:
s = fi.read()
args.input_files[0] = smack.top.temporary_file(name, ext, args)
# replace exit definition with exit_
s = re.sub(r'void\s+exit\s*\(int s\)', r'void exit_(int s)', s)
if not ('direc_start' in s or 'just_echo' in s):
s = re.sub(r'argv\[i\]=malloc\(11\);\s+argv\[i\]\[10\]\s+=\s+0;\s+for\(int\s+j=0;\s+j<10;\s+\+\+j\)\s+argv\[i\]\[j\]=__VERIFIER_nondet_char\(\);', r'argv[i] = malloc(3);\n argv[i][0] = __VERIFIER_nondet_char();\n argv[i][1] = __VERIFIER_nondet_char();\n argv[i][2] = 0;', s)
s = re.sub(r'char\s+\*a\s+=\s+malloc\(11\);\s+a\[10\]\s+=\s+0;\s+for\(int\s+i=0;\s+i<10;\s+\+\+i\)\s+a\[i\]=__VERIFIER_nondet_char\(\);', r'char *a = malloc(3);\n a[0] = __VERIFIER_nondet_char();\n a[1] = __VERIFIER_nondet_char();\n a[2] = 0;', s)
s = re.sub(r'static\s+char\s+dir\[42\];\s+for\(int\s+i=0;\s+i<42;\s+\+\+i\)\s+dir\[i\]\s+=\s+__VERIFIER_nondet_char\(\);\s+dir\[41\]\s+=\s+\'\\0\';', r'static char dir[3];\n dir[0] = __VERIFIER_nondet_char();\n dir[1] = __VERIFIER_nondet_char();\n dir[2] = 0;', s)
s = re.sub(r'__VERIFIER_assume\(i < 16\);', r'__VERIFIER_assume(i >= 0 && i < 16);', s)
if args.memory_safety and not 'argv=malloc' in s:
s = re.sub(r'typedef long unsigned int size_t', r'typedef unsigned int size_t', s)
elif args.memory_safety and re.search(r'getopt32\([^,)]+,[^,)]+,[^.)]+\);', s):
if not args.quiet:
print("Stumbled upon a benchmark that requires precise handling of vararg\n")
while (True):
pass
elif args.memory_safety and ('count is too big' in s or 'pdRfilsLHarPv' in s or 'rnugG' in s):
if not args.quiet:
print("Stumbled upon a benchmark that contains undefined behavior\n")
while (True):
pass
if args.float:
if re.search("fesetround|fegetround|InvSqrt|ccccdp-1",s):
sys.exit(smack.top.results(args)['unknown'])
if 'argv=malloc' in s:
# args.bit_precise = True
if args.signed_integer_overflow and ('unsigned int d = (unsigned int)((signed int)(unsigned char)((signed int)*q | (signed int)(char)32) - 48);' in s or 'bb_ascii_isalnum' in s or 'ptm=localtime' in s or '0123456789.' in s):
args.bit_precise = True
args.bit_precise_pointers = True
length = len(s.split('\n'))
if length < 60:
# replace all occurrences of 100000 with 10 and 15000 with 5
# Only target at small examples
s = re.sub(r'100000', r'10', s)
s = re.sub(r'15000', r'5', s)
s = re.sub(r'i<=10000', r'i<=1', s)
elif length < 710 and 'dll_create_master' in s:
args.no_memory_splitting = True
#Remove any preprocessed declarations of pthread types
#Also, if file contains 'pthread', set pthread mode
s,args.pthread = filters.scrub_pthreads(s)
if args.pthread:
s = "#include <pthread.h>\n" + s
with open(args.input_files[0], 'w') as fo:
fo.write(s)
def is_crappy_driver_benchmark(args, bpl):
if ("205_9a_array_safes_linux-3.16-rc1.tar.xz-205_9a-drivers--net--usb--rtl8150.ko-entry_point_true-unreach-call" in bpl or
"32_7a_cilled_true-unreach-call_linux-3.8-rc1-32_7a-drivers--gpu--drm--ttm--ttm.ko-ldv_main5_sequence_infinite_withcheck_stateful" in bpl or
"32_7a_cilled_true-unreach-call_linux-3.8-rc1-32_7a-drivers--media--dvb-core--dvb-core.ko-ldv_main5_sequence_infinite_withcheck_stateful" in bpl or
"32_7a_cilled_true-unreach-call_linux-3.8-rc1-32_7a-sound--core--seq--snd-seq.ko-ldv_main2_sequence_infinite_withcheck_stateful" in bpl or
"43_2a_bitvector_linux-3.16-rc1.tar.xz-43_2a-drivers--net--xen-netfront.ko-entry_point_true-unreach-call" in bpl or
"linux-3.14__complex_emg__linux-drivers-clk1__drivers-net-ethernet-ethoc_true-unreach-call" in bpl or
"linux-3.14__linux-usb-dev__drivers-media-usb-hdpvr-hdpvr_true-unreach-call" in bpl or
"linux-4.2-rc1.tar.xz-32_7a-drivers--net--usb--r8152.ko-entry_point_true-unreach-call" in bpl or
"linux-3.14__complex_emg__linux-kernel-locking-spinlock__drivers-net-ethernet-smsc-smsc911x_true-unreach-call" in bpl or
"linux-3.14__complex_emg__linux-kernel-locking-spinlock__drivers-net-wan-lmc-lmc_true-unreach-call" in bpl or
"linux-4.2-rc1.tar.xz-43_2a-drivers--net--ppp--ppp_generic.ko-entry_point_true-unreach-call" in bpl):
if not args.quiet:
print("Stumbled upon a crappy device driver benchmark\n")
while (True):
pass
def force_timeout():
sys.stdout.flush()
time.sleep(1000)
def verify_bpl_svcomp(args):
"""Verify the Boogie source file using SVCOMP-tuned heuristics."""
heurTrace = "\n\nHeuristics Info:\n"
if args.memory_safety:
if not (args.only_check_valid_deref or args.only_check_valid_free or args.only_check_memleak):
heurTrace = "engage valid deference checks.\n"
args.only_check_valid_deref = True
args.prop_to_check = 'valid-deref'
args.bpl_with_all_props = smack.top.temporary_file(os.path.splitext(os.path.basename(args.bpl_file))[0], '.bpl', args)
copyfile(args.bpl_file, args.bpl_with_all_props)
smack.top.property_selection(args)
elif args.only_check_valid_deref:
heurTrace = "engage valid free checks.\n"
args.only_check_valid_free = True
args.prop_to_check = 'valid-free'
args.only_check_valid_deref = False
args.bpl_file = smack.top.temporary_file(os.path.splitext(os.path.basename(args.bpl_file))[0], '.bpl', args)
copyfile(args.bpl_with_all_props, args.bpl_file)
smack.top.property_selection(args)
elif args.only_check_valid_free:
heurTrace = "engage memleak checks.\n"
args.only_check_memleak = True
args.prop_to_check = 'memleak'
args.only_check_valid_free = False
args.bpl_file = smack.top.temporary_file(os.path.splitext(os.path.basename(args.bpl_file))[0], '.bpl', args)
copyfile(args.bpl_with_all_props, args.bpl_file)
smack.top.property_selection(args)
# invoke boogie for floats
# I have to copy/paste part of verify_bpl
if args.float:
args.verifier = 'boogie'
boogie_command = ["boogie"]
boogie_command += [args.bpl_file]
boogie_command += ["/nologo", "/noinfer", "/doModSetAnalysis"]
boogie_command += ["/timeLimit:%s" % args.time_limit]
boogie_command += ["/errorLimit:%s" % args.max_violations]
boogie_command += ["/loopUnroll:%d" % args.unroll]
if args.bit_precise:
x = "bopt:" if args.verifier != 'boogie' else ""
boogie_command += ["/%sproverOpt:OPTIMIZE_FOR_BV=true" % x]
boogie_command += ["/%sz3opt:smt.relevancy=0" % x]
boogie_command += ["/%sz3opt:smt.bv.enable_int2bv=true" % x]
boogie_command += ["/%sboolControlVC" % x]
if args.verifier_options:
boogie_command += args.verifier_options.split()
boogie_output = smack.top.try_command(boogie_command, timeout=args.time_limit)
boogie_result = smack.top.verification_result(boogie_output)
write_error_file(args, boogie_result, boogie_output)
sys.exit(smack.top.results(args)[boogie_result])
# If pthreads found, perform lock set analysis
if args.pthread:
lockpwn_command = ["lockpwn"]
lockpwn_command += [args.bpl_file]
lockpwn_command += ["/corral"]
args.bpl_file = smack.top.temporary_file(os.path.splitext(os.path.basename(args.bpl_file))[0], '.bpl', args)
lockpwn_command += ["/o:%s" % args.bpl_file]
lockpwn_output = smack.top.try_command(lockpwn_command);
corral_command = ["corral"]
corral_command += [args.bpl_file]
corral_command += ["/tryCTrace", "/noTraceOnDisk", "/printDataValues:1"]
corral_command += ["/useProverEvaluate", "/cex:1"]
with open(args.bpl_file, "r") as f:
bpl = f.read()
is_crappy_driver_benchmark(args, bpl)
if args.pthread:
if "fib_bench" in bpl or "27_Boop_simple_vf_false-unreach-call" in bpl:
heurTrace += "Increasing context switch bound for certain pthread benchmarks.\n"
corral_command += ["/k:30"]
else:
corral_command += ["/k:3"]
if not "qrcu_reader2" in bpl and not "__VERIFIER_atomic_take_write_lock" in bpl and not "fib_bench" in bpl:
corral_command += ["/cooperative"]
else:
corral_command += ["/k:1"]
if not (args.memory_safety or args.bit_precise):
corral_command += ["/di"]
# we are not modeling strcpy
if args.pthread and "strcpy" in bpl:
heurTrace += "We are not modeling strcpy - aborting\n"
if not args.quiet:
print(heurTrace + "\n")
sys.exit(smack.top.results(args)['unknown'])
# Setting good loop unroll bound based on benchmark class
loopUnrollBar = 8
staticLoopBound = 65536
if not args.bit_precise and "ssl3_accept" in bpl and "s__s3__tmp__new_cipher__algorithms" in bpl:
heurTrace += "ControlFlow benchmark detected. Setting loop unroll bar to 23.\n"
loopUnrollBar = 23
elif "s3_srvr.blast.10_false-unreach-call" in bpl or "s3_srvr.blast.15_false-unreach-call" in bpl:
heurTrace += "ControlFlow benchmark detected. Setting loop unroll bar to 23.\n"
loopUnrollBar = 23
elif "NonTerminationSimple4_false-no-overflow" in bpl:
heurTrace += "Overflow benchmark detected. Setting loop unroll bar to 1024.\n"
loopUnrollBar = 1024
elif " node3" in bpl:
heurTrace += "Sequentialized benchmark detected. Setting loop unroll bar to 100.\n"
loopUnrollBar = 100
elif "calculate_output" in bpl or "psyco" in bpl:
heurTrace += "ECA benchmark detected. Setting loop unroll bar to 15.\n"
loopUnrollBar = 15
elif "ldv" in bpl:
if "linux-4.2-rc1.tar.xz-08_1a-drivers--staging--lustre--lustre--llite--llite_lloop.ko-entry_point" in bpl or "linux-3.14__complex_emg__linux-usb-dev__drivers-media-usb-hdpvr-hdpvr" in bpl:
heurTrace += "Special LDV benchmark detected. Setting loop unroll bar to 32.\n"
loopUnrollBar = 32
else:
heurTrace += "LDV benchmark detected. Setting loop unroll bar to 13.\n"
loopUnrollBar = 13
staticLoopBound = 64
elif "standard_strcpy_false-valid-deref_ground_true-termination" in bpl or "960521-1_false-valid-free" in bpl or "960521-1_false-valid-deref" in bpl or "lockfree-3.3" in bpl or "list-ext_false-unreach-call_false-valid-deref" in bpl:
heurTrace += "Memory safety benchmark detected. Setting loop unroll bar to 129.\n"
loopUnrollBar = 129
elif "is_relaxed_prefix" in bpl:
heurTrace += "Benchmark relax_* detected. Setting loop unroll bar to 15.\n"
loopUnrollBar = 15
elif "id_o1000_false-unreach-call" in bpl:
heurTrace += "Recursive benchmark detected. Setting loop unroll bar to 1024.\n"
loopUnrollBar = 1024
elif "n.c24" in bpl or "array_false-unreach-call3" in bpl:
heurTrace += "Loops benchmark detected. Setting loop unroll bar to 1024.\n"
loopUnrollBar = 1024
elif "printf_false-unreach-call" in bpl or "echo_true-no-overflow" in bpl:
heurTrace += "BusyBox benchmark detected. Setting loop unroll bar to 11.\n"
loopUnrollBar = 11
elif args.memory_safety and "__main($i0" in bpl:
heurTrace += "BusyBox memory safety benchmark detected. Setting loop unroll bar to 4.\n"
loopUnrollBar = 4
elif args.signed_integer_overflow and "__main($i0" in bpl:
heurTrace += "BusyBox overflows benchmark detected. Setting loop unroll bar to 4.\n"
loopUnrollBar = 4
elif args.signed_integer_overflow and ("jain" in bpl or "TerminatorRec02" in bpl or "NonTerminationSimple" in bpl):
heurTrace += "Infinite loop in overflow benchmark. Setting loop unroll bar to INT_MAX.\n"
loopUnrollBar = 2**31 - 1
if not "forall" in bpl:
heurTrace += "No quantifiers detected. Setting z3 relevancy to 0.\n"
corral_command += ["/bopt:z3opt:smt.relevancy=0"]
if args.bit_precise:
heurTrace += "--bit-precise flag passed - enabling bit vectors mode.\n"
corral_command += ["/bopt:proverOpt:OPTIMIZE_FOR_BV=true"]
corral_command += ["/bopt:boolControlVC"]
if not args.bit_precise_pointers:
corral_command += ["/bopt:z3opt:smt.bv.enable_int2bv=true"]
if args.memory_safety:
if args.prop_to_check == 'valid-deref':
if "memleaks_test12_false-valid-free" in bpl:
time_limit = 10
else:
time_limit = 750
elif args.prop_to_check == 'valid-free':
time_limit = 80
elif args.prop_to_check == 'memleak':
time_limit = 50
else:
time_limit = 880
command = list(corral_command)
command += ["/timeLimit:%s" % time_limit]
command += ["/v:1"]
command += ["/maxStaticLoopBound:%d" % staticLoopBound]
command += ["/recursionBound:65536"]
command += ["/irreducibleLoopUnroll:2"]
command += ["/trackAllVars"]
verifier_output = smack.top.try_command(command, timeout=time_limit)
result = smack.top.verification_result(verifier_output)
if result == 'error' or result == 'invalid-deref' or result == 'invalid-free' or result == 'invalid-memtrack' or result == 'overflow': #normal inlining
heurTrace += "Found a bug during normal inlining.\n"
if not args.quiet:
error = smack.top.error_trace(verifier_output, args)
print error
if args.memory_safety:
heurTrace += (args.prop_to_check + "has errors\n")
if args.prop_to_check == 'valid-free':
if args.valid_deref_check_result != 'verified':
force_timeout()
elif args.prop_to_check == 'memleak':
if args.valid_free_check_result == 'timeout':
force_timeout()
elif result == 'timeout': #normal inlining
heurTrace += "Timed out during normal inlining.\n"
heurTrace += "Determining result based on how far we unrolled.\n"
# If we managed to unroll more than loopUnrollBar times, then return verified
# First remove exhausted loop bounds generated during max static loop bound computation
unrollMax = 0
if 'Verifying program while tracking' in verifier_output:
verifier_output = re.sub(re.compile('.*Verifying program while tracking', re.DOTALL),
'Verifying program while tracking', verifier_output)
it = re.finditer(r'Exhausted recursion bound of ([1-9]\d*)', verifier_output)
for match in it:
if int(match.group(1)) > unrollMax:
unrollMax = int(match.group(1))
else:
heurTrace += "Corral didn't even start verification.\n"
if unrollMax >= loopUnrollBar:
heurTrace += "Unrolling made it to a recursion bound of "
heurTrace += str(unrollMax) + ".\n"
heurTrace += "Reporting benchmark as 'verified'.\n"
if args.execute and not args.pthread:
heurTrace += "Hold on, let's see the execution result.\n"
execution_result = run_binary(args)
heurTrace += "Excecution result is " + execution_result + '\n'
if execution_result != 'true':
heurTrace += "Oops, execution result says {0}.\n".format(execution_result)
if not args.quiet:
print(heurTrace + "\n")
sys.exit(smack.top.results(args)['unknown'])
random_test_result = random_test(args, result)
if random_test_result == 'false' or random_test_result == 'unknown':
heurTrace += "Oops, random testing says {0}.\n".format(random_test_result)
if not args.quiet:
print(heurTrace + "\n")
sys.exit(smack.top.results(args)['unknown'])
if not args.quiet:
print(heurTrace + "\n")
if args.memory_safety:
heurTrace += (args.prop_to_check + "is verified\n")
if args.prop_to_check == 'valid-deref':
args.valid_deref_check_result = 'verified'
elif args.prop_to_check == 'valid-free':
args.valid_free_check_result = 'verified'
elif args.prop_to_check == 'memleak':
if args.valid_deref_check_result == 'timeout':
force_timeout()
else:
sys.exit(smack.top.results(args)[args.valid_deref_check_result])
verify_bpl_svcomp(args)
else:
write_error_file(args, 'verified', verifier_output)
sys.exit(smack.top.results(args)['verified'])
else:
heurTrace += "Only unrolled " + str(unrollMax) + " times.\n"
heurTrace += "Insufficient unrolls to consider 'verified'. "
heurTrace += "Reporting 'timeout'.\n"
if not args.quiet:
print(heurTrace + "\n")
sys.stdout.flush()
if args.memory_safety:
heurTrace += (args.prop_to_check + " times out\n")
if args.prop_to_check == 'valid-deref':
args.valid_deref_check_result = 'timeout'
force_timeout()
elif args.prop_to_check == 'valid-free':
args.valid_free_check_result = 'timeout'
elif args.prop_to_check == 'memleak':
if args.valid_deref_check_result == 'timeout':
force_timeout()
else:
sys.exit(smack.top.results(args)[args.valid_deref_check_result])
verify_bpl_svcomp(args)
else:
# Sleep for 1000 seconds, so svcomp shows timeout instead of unknown
time.sleep(1000)
elif result == 'verified': #normal inlining
heurTrace += "Normal inlining terminated and found no bugs.\n"
else: #normal inlining
heurTrace += "Normal inlining returned 'unknown'. See errors above.\n"
if not args.quiet:
print(heurTrace + "\n")
if args.memory_safety and result == 'verified':
heurTrace += (args.prop_to_check + " is verified\n")
if args.prop_to_check == 'valid-deref':
args.valid_deref_check_result = 'verified'
elif args.prop_to_check == 'valid-free':
args.valid_free_check_result = 'verified'
elif args.prop_to_check == 'memleak':
if args.valid_deref_check_result == 'timeout':
force_timeout()
else:
sys.exit(smack.top.results(args)[args.valid_deref_check_result])
verify_bpl_svcomp(args)
else:
write_error_file(args, result, verifier_output)
sys.exit(smack.top.results(args)[result])
def write_error_file(args, status, verifier_output):
return
if args.memory_safety or status == 'timeout' or status == 'unknown':
return
hasBug = (status != 'verified')
#if not hasBug:
# return
if args.error_file:
error = None
if args.language == 'svcomp':
error = smackJsonToXmlGraph(smack.top.smackdOutput(verifier_output), args, hasBug)
elif hasBug:
error = smack.top.error_trace(verifier_output, args)
if error is not None:
with open(args.error_file, 'w') as f:
f.write(error)
def run_binary(args):
#process the file to make it runnable
with open(args.input_files[0], 'r') as fi:
s = fi.read()
s = re.sub(r'(extern )?void __VERIFIER_error()', '//', s)
s = re.sub(r'__VERIFIER_error\(\)', 'assert(0)', s)
s = '#include<assert.h>\n' + s
name = os.path.splitext(os.path.basename(args.input_files[0]))[0]
tmp1 = smack.top.temporary_file(name, '.c', args)
with open(tmp1, 'w') as fo:
fo.write(s)
tmp2 = smack.top.temporary_file(name, '.bin', args)
tmp2 = tmp2.split('/')[-1]
#compile and run
cmd = ['clang', tmp1, '-o', tmp2]
#cmd += args.clang_options.split()
#if '-m32' in args.clang_options.split():
#cmd += ['-m32']
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
rc = proc.returncode
if rc:
print 'Compiling error'
print err
return 'unknown'
else:
cmd = [r'./' + tmp2]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
rc = proc.returncode
if rc:
if re.search(r'Assertion.*failed', err):
return 'false'
else:
print 'Execution error'
print err
return 'unknown'
else:
return 'true'
| 43.849505 | 289 | 0.678739 |
83f461707c32cbd3add296d28b17099465852521 | 31,000 | py | Python | cadee/dyn/trajectory.py | kamerlinlab/cadee | 8fa34fc4f7fc496c8843e9380075ae11fca7aaa7 | [
"MIT"
] | 10 | 2017-01-11T09:21:27.000Z | 2021-06-27T03:56:15.000Z | cadee/dyn/trajectory.py | kamerlinlab/cadee | 8fa34fc4f7fc496c8843e9380075ae11fca7aaa7 | [
"MIT"
] | 2 | 2017-07-18T06:54:17.000Z | 2020-08-25T14:03:14.000Z | cadee/dyn/trajectory.py | kamerlinlab/cadee | 8fa34fc4f7fc496c8843e9380075ae11fca7aaa7 | [
"MIT"
] | 3 | 2017-03-15T12:18:13.000Z | 2021-02-28T05:09:36.000Z | #!/usr/bin/env python
"""
Module calling Q, and analysing logfile / Simulation
Author: {0} ({1})
This module is part of CADEE, the framework for
Computer-Aided Directed Evolution of Enzymes.
"""
from __future__ import print_function
from filecmp import cmp as comparefiles
from platform import node as hostname
import gzip
import os
import subprocess
import shutil
import time
import analysis
import tools
from tools import File
__author__ = "Beat Amrein"
__email__ = "beat.amrein@gmail.com"
logger = tools.getLogger('dyn.traj')
# This class is running a simulation
# Its essentially my python-implementation of submit.sh
# RELEVANT FOR PYTHON - MPI Interaction:
# http://stackoverflow.com/questions/10590064/interact-with-a-mpi-binary-via-a-non-mpi-python-script
# TODO 1: Custom Exceptions / Handling
# 2: if hot-atoms in first 4 steps, restart with smaller stepsize
# 3: automatic mapping
# 4: compress/uncompress pdbfile
# 5: compress/uncompress topology
# 6: compress/uncompress input files
# 7: fastforward md.QdynPackage, when loaded
# 8: mutation starting from just a sequence
# 9: Add flag: has_failed use instead of rising exception
#
# User-Defined Constants
DONT_LOG_LINES_WITH = ["--- Nonbonded pair list generation -----",
"---------- Timing ----------",
"Seconds per step (wall-clock):"]
NLC = '\n'
# CONSTANTS
NORMALTERM = "terminated normally"
NAN_INDICATOR = 'SUM NaN NaN NaN'
SHAKE_TERM = "Terminating due to shake failure"
WARNING_HOT_ATOM = ">>> WARNING: hot atom, i ="
ERR_LOG_TOO_SHORT = 1
ERR_ABNORMAL_TERM = 2
ERR_LOG_INEXISTENT = 4
ERR_SHAKE = 8
ERR_NAN = 16
class WorkUnit(object):
""" container for 1 qdyn-simuluation """
# class WorkUnitException(Exception):
# pass
# class NaNException(WorkUnitException):
# pass
# class ShakeFailure(WorkUnitException):
# pass
# class AbnormalTermination(WorkUnitException):
# pass
# class NoLogFile(WorkUnitException):
# pass
# class TooShortLogFile(WorkUnitException):
# pass
DEBUG = False
def __init__(self, unitnumber, inputfile, topology,
pdbfile=None, fepfile=None, restraintfile=None,
restartfile=None):
if isinstance(inputfile, str):
inputfile = [inputfile, '']
elif isinstance(inputfile, list):
pass
else:
raise 'WTF'
self.unitnumber = unitnumber
# error Msg
self.errMsg = ''
# shared inputs
self.topology = topology
self.pdbfile = pdbfile # opt
self.fepfile = fepfile # opt
# inputs
self.inputfile = inputfile
logger.debug('Input file: %s', self.inputfile[0])
self.restartfile = restartfile # opt
self.restraintfile = restraintfile # opt
# outputs
self.logfile = None
self.velocityfile = None # opt
self.dcdfile = None # opt
self.energyfile = None # opt
# stats
self.time = 0
self.status = None
self.q_exitcode = None
self._parse_inputfile()
if self.logfile is None or self.logfile[1] == '':
log = os.path.splitext(self.inputfile[0])[0]+".log"
loggz = os.path.splitext(self.inputfile[0])[0]+".log.gz"
if os.path.exists(log) and os.path.exists(loggz):
# this should not happen, so we give a Msg.warn and
# then kill the old log
logger.warning('deleting log, keeping log.gz')
os.remove(log)
fname = loggz
elif os.path.exists(loggz):
fname = loggz
else:
fname = log
self.logfile = [fname, '']
if os.path.exists(fname):
self.checklogfile()
if self.status != 0:
logger.warning('A log file exists BUT with status: %s !', self.status)
if self.topology is None:
logger.info(self.inputfile[0])
raise (Exception, 'topo')
if self.inputfile is None:
raise (Exception, 'inp')
if self.logfile is None:
raise (Exception, 'log')
def stats(self, obj):
""" return stats on fileobj (eg bytes, or empty) """
if obj is None:
return ''
elif isinstance(obj, list) and len(obj) == 2:
if obj[1] == '':
return str(obj[0])+': is empty'
else:
return str(obj[0])+':'+str(len(obj[1]))+'chars'
elif isinstance(obj, str):
return str(obj)+": is empty"
else:
logger.info(len(obj))
raise Exception('weirdo')
def __repr__(self):
""" return str of self """
out = ''
for obj in [self.topology, self.pdbfile, self.fepfile,
self.inputfile, self.restartfile,
self.restraintfile, self.logfile, self.velocityfile,
self.dcdfile, self.energyfile]:
out += self.stats(obj) + NLC
out += self.stats(obj) + NLC
out += 'status:' + str(self.status)
out += 'exitcode:' + str(self.q_exitcode)
return out
def _parse_inputfile(self):
''' Parse input file and populate self with filenames '''
files_section = False
logger.debug(os.getcwd())
if self.inputfile[1] is None or self.inputfile[1] == '':
with open(self.inputfile[0], 'r') as f:
self.inputfile[1] = f.read()
for line in self.inputfile[1].split(NLC):
if line.strip() == '':
continue
# kill comments:
line = line.replace('#', '!').split('!')[0].strip()
if files_section:
if '[' in line.strip()[0]:
break
else:
if len(line.split()) != 2:
continue
ftype, fname = line.split()
ftype = ftype.lower().strip()
if ftype == 'topology':
if self.topology is None:
self.topology = [fname, '']
elif ftype == 'fep':
if self.fepfile is None:
self.fepfile = [fname, '']
elif ftype == 'restart':
if self.restartfile is None:
self.restartfile = [fname, '']
elif ftype == 'restraint':
if self.restraintfile is None:
if os.path.isfile(fname):
self.restraintfile = [fname, '']
else:
if fname == self.restartfile[0] + "st.re":
# restart + 'st.re' == restraint
msg = 'RE-use restart as restraint:'
msg += '-----> %s'
msg += self.restartfile[0]
logger.debug(msg)
shutil.copy(self.restartfile[0], fname)
self.restraintfile = [fname, '']
else:
# search harddisk for similar restraint
restartfile = fname[:-5]
msg = 'Try: use restart as restraint:'
msg += '-----> %s'
msg += restartfile
logger.debug(msg)
if os.path.isfile(restartfile):
shutil.copy(restartfile, fname)
else:
msg = 'cant find restraint:' + fname
logger.warning(msg)
# TODO: more heuristics
raise (Exception, msg)
elif ftype == 'final' and self.velocityfile is None:
self.velocityfile = [fname, '']
elif ftype == 'trajectory' and self.dcdfile is None:
self.dcdfile = [fname, '']
elif ftype == 'energy' and self.energyfile is None:
self.energyfile = [fname, '']
else:
logger.warning('do not know this key here %s', ftype)
raise (Exception, 'do not know this key here')
if line.lower() == '[files]':
files_section = True
if not files_section:
logger.warning('Fatal: no files section in input file %s',
self.inputfile)
raise (Exception, 'Fatal: no files section in input file')
def run(self, exe):
""" run simulation with executable exe """
if os.path.isfile(self.logfile[0]):
self.status = self.checklogfile()
if self.status == 0:
return 0
ifname = self.inputfile[0]
ofname = self.logfile[0]
if len(ifname) == 1 or len(ofname) == 1:
raise (Exception, 'WTF')
self._deploy()
# run q
start = time.time()
cmd = exe + " " + ifname
logger.info("%s", ifname)
logger.debug("%s %s", hostname(), cmd)
try:
subprocess.check_call([exe, ifname], stdout=open(ofname, 'w'))
self.q_exitcode = 0
except subprocess.CalledProcessError as exitstatus:
logger.warning('Detected a non-zero exit status!', exitstatus)
self.q_exitcode = exitstatus.returncode
# check logfile
self.checklogfile()
self.time = time.time() - start
if self.status == 0 and self.q_exitcode == 0:
return 0
else:
logger.warning('Detected status %s %s %s', self.status,
'and an exitcode', self.q_exitcode)
return self.status + self.q_exitcode
def checklogfile(self):
""" Check Logfile """
logger.debug("Checking log file ...")
# TODO: do something about hot atoms.
# eg a) run longer
# b) never read the restraint of this run
# WARN_HOT_ATOM = 0
self.hot_atoms = 0
log = []
logfile = self.logfile[0]
if os.path.isfile(self.logfile[0]+".gz"):
os.remove(self.logfile[0])
self.logfile[0] = self.logfile[0]+".gz"
try:
if logfile[-3:] == ".gz":
for line in gzip.open(logfile):
log.append(line)
compress = False
else:
with open(logfile) as fil:
for line in fil:
if line in DONT_LOG_LINES_WITH:
continue
log.append(line)
compress = True
except IOError:
err = "Could not open log file!"
logger.warning(err)
self.errMsg += err
self.status = ERR_LOG_INEXISTENT
return ERR_LOG_INEXISTENT
if len(log) > 5:
l5l = 'Last 5 lines of log file:'
self.errMsg += NLC + l5l + NLC
for logline in log[-5:]:
self.errMsg += len('Last 5 Lines') * ' ' + ">" + logline
self.errMsg += "/" + 'Last 5 Lines' + NLC + NLC
if len(log) < 100:
err = 'The log file is too short (less than 100 lines)!'
logger.warning(err)
self.errMsg += err
self.status = ERR_LOG_TOO_SHORT
for line in log:
logger.info(line.strip())
return ERR_LOG_TOO_SHORT
else:
# TODO: check if we have insane high energies
for line in log[-50:]:
allok = 0
if NORMALTERM in line:
allok = 1
break
elif NAN_INDICATOR in line:
err = "Found NaN Error: '" + NAN_INDICATOR + "'"
logger.warning(err)
self.errMsg += err
self.status = ERR_NAN
return ERR_NAN
elif SHAKE_TERM in line:
err = "Found Shake Error: '" + SHAKE_TERM + "'"
logger.warning(err)
self.errMsg += err
self.status = ERR_SHAKE
return ERR_SHAKE
elif WARNING_HOT_ATOM in line:
if self.hot_atoms < 1:
err = "Found HOT ATOM'"
logger.warning(err)
self.errMsg += err
self.hot_atoms += 1
if allok != 1:
err = 'The log file is missing ' + str(NORMALTERM) + ' string!'
err += ' UNKNOWN ERROR '
self.errMsg += err
logger.warning(err)
self.status = ERR_ABNORMAL_TERM
return ERR_ABNORMAL_TERM
if compress:
# re-writing compressed logfile without rubbish lines
gzip.open(logfile+".gz", 'wb').writelines(log)
self.logfile[0] = logfile+".gz"
os.remove(logfile)
# search and kill douplicate *rest.re files
if self.restraintfile is not None and self.restartfile is not None:
if len(self.restraintfile) == 2 and len(self.restartfile) == 2:
restre = self.restraintfile[0]
restart = self.restartfile[0]
if (len(restre) > 8 and len(restart) > 3 and
restre[-8:] == ".rest.re" and restart[-3:] == ".re"):
if os.path.isfile(restre) and os.path.isfile(restart):
if comparefiles(restart, restre, False):
os.remove(restre)
# compress energyfile
if self.energyfile is not None and len(self.energyfile) == 2:
energy = self.energyfile[0]
if os.path.isfile(energy) and energy[-3:] != ".gz":
engz = energy + ".gz"
with open(energy, 'rb') as fil_in:
with gzip.open(engz, 'wb') as fil_out:
shutil.copyfileobj(fil_in, fil_out)
os.remove(energy)
self.status = 0
return 0
def _deploy(self):
""" serialize data from memory-oject to disk (deploy to disk) """
for data in (self.topology, self.pdbfile, self.fepfile,
self.inputfile, self.restraintfile, self.restartfile,
self.logfile, self.dcdfile,
self.energyfile, self.velocityfile):
if data is None:
continue
if isinstance(data, str):
continue
fname, data = data
if data.strip() == '':
if WorkUnit.DEBUG:
logger.debug('Wont write empty file %s !', fname)
continue
# if os.path.isfile(fname):
# Msg.log('Wont overwrite existing file', fname)
# continue
if isinstance(fname, str) and isinstance(data, str):
with open(fname, 'wb') as fil:
fil.write(data)
if WorkUnit.DEBUG:
logger.debug('Serialized: %s .', fname)
else:
logger.warning('This might be a problem here: type(fname) == %s %s %s !',
type(fname), 'and type(data) ==', type(data))
raise (Exception, 'Expected: str() and str()')
class QdynPackage(object):
def map_and_analyze(self, eqfil=None):
if self.mapped is None:
logger.debug('Mapping disabled.')
elif self.mapped is True:
logger.debug('is already mapped (skipping)!')
return True
elif self.mapped is False:
with tools.cd(self.path):
if eqfil is None:
self.mapped = analysis.main(self.map_settings, eqfil)
else:
analysis.main(self.map_settings, eqfil)
else:
raise 'WTF'
def parse_file(self, fname):
"""
Read fname.
If fname is None:
return None
if fname is str:
read the file and store in memory
if fname is [path, data]
return [basename(path), data]
"""
if fname is None:
return None
if isinstance(fname, str):
if os.path.isfile(fname):
with open(fname, 'r') as fil:
data = fil.read()
fname = os.path.basename(fname)
return [fname, data]
else:
logger.warning('Could not find %s .', fname)
os.system('ls')
raise 'FAILED'
elif isinstance(fname, list) and len(fname) == 2:
fname[0] = os.path.basename(fname[0])
return fname
else:
raise (Exception, 'either str(fname) or list[fname, data])')
def check_exe(self):
""" check executable permissions, raises exception if not OK """
if (isinstance(self.q_dyn5_exe, str) and
os.path.isfile(self.q_dyn5_exe)):
if os.access(self.q_dyn5_exe, os.X_OK):
pass
else:
raise (Exception, 'executable is not executable!')
else:
raise (Exception, 'executable: is not file.')
def set_exe(self, exe):
""" set self.q_dyn5_exe to exe """
self.q_dyn5_exe = exe
def set_temp(self, temp):
""" cd into temp """
if not os.path.isdir(temp):
raise (Exception, 'you provided a path which is not a directory!')
else:
os.chdir(temp)
self.path = temp
def __init__(self, topology, path, q_executable=None, inputfiles=None,
description=None, pdbfile=None, restartfile=None,
restraintfile=None, fepfile=None, map_settings=None):
"""
Inititalisation of a md-simulation.
Topology:
string with a filename OR list [ topology-name, data ]
Path:
a string with the path where this unit is working in.
Executable:
string with a filename
Inputfiles:
list with inputfiles.
Description:
str with description
PDBFile:
string with a filename OR list [ pdb-name, data ]
restartfile:
string with a filename OR list [ restartname, data ]
restraintfile:
string with a filename OR list [ restraintname, data ]
"""
self.set_temp(path)
if q_executable is not None:
self.set_exe(q_executable)
self.topology = self.parse_file(topology)
self.fepfile = self.parse_file(fepfile)
self.pdbfile = self.parse_file(pdbfile)
self.restartfile = self.parse_file(restartfile)
self.restraintfile = self.parse_file(restraintfile)
self.description = self.parse_file(description)
self.wus = []
self.cwu = None
self.if_pos = 0
self.inputfiles = []
self.filenames = {}
if inputfiles is not None and isinstance(inputfiles, list):
for inp in inputfiles:
if inp is None:
raise 'WTF'
self.add_inputfile(inp)
else:
self.inputfiles = []
if map_settings is None:
self.mapped = None
else:
self.mapped = False
self.map_settings = map_settings
logger.info('Next qdyn simulation step initialized.')
def stats(self, obj):
if obj is None:
return ''
elif isinstance(obj, list) and len(obj) == 2:
if obj[1] == '':
return str(obj[0]) + ': is empty'
else:
return str(obj[0]) + ':' + str(len(obj[1])) + 'chars'
elif isinstance(obj, str):
return str(obj)+": is empty"
else:
logger.warning('weirdo %s', (len(obj)))
raise Exception('weirdo')
def __repr__(self):
out = ''
out += 'PackageFiles' + NLC
for obj in [self.topology, self.pdbfile, self.fepfile,
self.restartfile, self.restraintfile, self.description]:
if self.stats(obj) != '':
out += ' ' + self.stats(obj) + NLC
out += NLC
# out += '# inputiles:' + str(self.inputfiles)
out += 'Work-Units:' + NLC
for each in self.wus:
out += ' WU: ' + str(each) + NLC
out += 'Input files:' + NLC
for i in range(len(self.wus), len(self.inputfiles)):
out += ' IF: ' + str(self.inputfiles[i]) + NLC
out += '@ position:' + str(self.if_pos) + NLC
out += ' finished:' + str(self.is_finished()) + NLC
return out
def get_file_with_name(self, fname):
with tools.cd(self.path):
if os.path.exists(fname):
return File(fname, read=True)
raise IOError('File not found', fname, 'in', self.path)
def _check_eq_and_map(self):
if '_eq' in self.inputfiles[self.if_pos][0]:
logger.debug('is eq-file %s', (self.inputfiles[self.if_pos][0]))
self.map_and_analyze(self.inputfiles[self.if_pos][0])
elif '_fep' in self.inputfiles[self.if_pos][0]:
pass
elif '_dyn' in self.inputfiles[self.if_pos][0]:
pass
else:
logger.warning('Neither temperization, nor equlilbration nor fep: %s',
(self.inputfiles[self.if_pos][0]))
def compute(self):
"""
Run Q:
Forward trough inputfile-list to inputfile without logfile and run it.
"""
with tools.cd(self.path):
# Check if we're done arleady
if self.is_finished():
try:
logger.warning('Nothing to compute. %s %s', self.if_pos,
self.inputfiles[self.if_pos])
except IndexError:
logger.warning('Nothing to compute. %s', self.if_pos)
# TODO: 1) automatic mapping
else:
# TODO: add restart-capability
if not self.is_finished():
if len(self.wus) > self.if_pos:
old_input = self.wus[self.if_pos].inputfile[0]
new_input = self.inputfiles[self.if_pos]
if old_input == new_input:
if self.wus[self.if_pos].checklogfile() == 0:
# this WorkUnit is finished we; load next one
logger.warning(
'this run is already done skipping %s',
self.wus[self.if_pos].inputfile[0]
)
self.if_pos += 1
self.compute()
return
# Generate new compute units, until one w/o logfile exists
while True:
if self.is_finished():
return
self.cwu = self.create_next_workunit()
if (self.cwu.status is not None and
self.cwu.status == 0):
logger.debug(
'skip step %s',
self.inputfiles[self.if_pos][0])
self._check_eq_and_map()
self.wus.append(self.cwu)
self.if_pos += 1
continue
break
exe = self.q_dyn5_exe
self.check_exe()
if len(self.wus) != self.cwu.unitnumber:
raise (Exception, 'discrepancy in input file order')
if self.cwu.run(exe) == 0:
self.wus.append(self.cwu)
self._check_eq_and_map()
else:
err = 'There was a problem with step: '
err += str(self.if_pos)
err += ', in inputfile'
err += str(self.inputfiles[self.if_pos][0])
err += NLC + 'The status Code was:'
err += str(self.cwu.status)
err += NLC + NLC + 'The Error Messages where: '
err += NLC + NLC + 'Directory' + os.getcwd()
err += str(self.cwu.errMsg) + NLC
err += 'Will Raise Exception...'
logger.warning(err)
raise (Exception, 'computation failed')
# increment for next step
self.if_pos += 1
def _parse_inputfile(self, inputfile):
''' Parse input file '''
files_section = False
self.filenames = {}
for line in open(inputfile, 'r'):
if line.strip() == '':
continue
# kill comments:
line = line.replace['#', '!'].split()[0].strip()
if files_section:
if '[' in line.strip()[0]:
files_section = False
else:
ftype, fname = line.split()
ftype = ftype.lower()
if ftype == 'topology':
self.filenames[ftype] = fname
elif ftype == 'fepfile':
self.filenames[ftype] = fname
elif ftype == 'restart':
self.filenames[ftype] = fname
elif ftype == 'restraint':
self.filenames[ftype] = fname
elif ftype == 'final':
self.filenames[ftype] = fname
elif ftype == 'trajectory':
self.filenames[ftype] = fname
elif ftype == 'energy':
self.filenames[ftype] = fname
else:
raise (Exception, "Parse Input File: Unknown Filetype.")
if line.lower() == '[files]':
files_section = True
def add_inputfile(self, inputfile):
''' Add an inputfile '''
inputfile = self.parse_file(inputfile)
if inputfile is None:
raise 'WTF'
self.inputfiles.append(inputfile)
def create_next_workunit(self):
''' Load & Prepare the next input file '''
if self.if_pos == 0:
# initial one, may need special input
cwu = WorkUnit(self.if_pos, self.inputfiles[self.if_pos],
self.topology, self.pdbfile, self.fepfile,
self.restraintfile, self.restartfile)
else:
# try to locate the restart and restraint files that might be need
restart = None
restraint = None
self.parse_file(self.inputfiles[self.if_pos])
if 'restart' in self.filenames:
for i in range(self.if_pos):
old_restart = self.wus[self.if_pos-i].velocityfile[0]
new_restart = self.filenames['restart']
if old_restart == new_restart:
restart = self.wus[self.if_pos-i].velocityfile
if i != 1:
logger.warning('UNUSUAL RESTART')
logger.warning(
'input: %s',
self.inputfiles[self.if_pos])
logger.warning(
'is not using restart from: %s',
self.inputfiles[self.if_pos-1])
logger.warning(
'BUT INSTEAD the one from: %s',
self.inputfiles[self.if_pos-i])
if 'restraint' in self.filenames:
for i in range(self.if_pos):
old_restraint = self.wus[self.if_pos-i].velocityfile[0] + "st.re" # NOPEP8
new_restraint = self.filenames['restraint']
if old_restraint == new_restraint:
restraint = self.wus[self.if_pos-i].velocityfile[:]
restraint[0] += "st.re"
# TODO: multiple fep files could be taken from here as well
cwu = WorkUnit(self.if_pos, self.inputfiles[self.if_pos],
self.topology, None, self.fepfile, restraint,
restart)
return cwu
def is_finished(self):
""" Return True is simulation is finished, or False. """
if len(self.inputfiles) <= self.if_pos:
self.map_and_analyze()
return True
return False
def progress(self):
""" logger.info progress of md.QdynPackage """
logger.info("%s / %s", self.if_pos, len(self.inputfiles))
class MolDynSim(object):
""" Container for MolecularDynamicSimulation """
def __init__(self, path, qexecutable, topology,
inputfiles, fepfile, restartfile,
restraintfile, description, pdbfile,
map_settings=None):
self.pack = QdynPackage(topology, path, qexecutable, inputfiles,
description, pdbfile, restartfile,
restraintfile, fepfile, map_settings)
def set_executable(self, exe):
""" set executable """
self.pack.set_exe(exe)
def set_tempdir(self, temp):
""" set temporary directory """
self.pack.set_temp(temp)
def is_finished(self):
""" return bool """
return self.pack.is_finished()
def progress(self):
""" write progress to stdout"""
self.pack.progress()
def __repr__(self):
return self.pack.__repr__()
def compute(self):
""" run one step, return self """
self.pack.compute()
return self.pack
| 36.86088 | 100 | 0.486548 |
584debbf07f91d5cf098b17fa41a8b798a6a5dcf | 23 | py | Python | samples/LuceneInAction/lia/advsearching/__init__.py | romanchyla/pylucene-trunk | 990079ff0c76b972ce5ef2bac9b85334a0a1f27a | [
"Apache-2.0"
] | 15 | 2015-05-21T09:28:01.000Z | 2022-03-18T23:41:49.000Z | samples/LuceneInAction/lia/advsearching/__init__.py | fnp/pylucene | fb16ac375de5479dec3919a5559cda02c899e387 | [
"Apache-2.0"
] | 1 | 2021-09-30T03:59:43.000Z | 2021-09-30T03:59:43.000Z | samples/LuceneInAction/lia/advsearching/__init__.py | romanchyla/pylucene-trunk | 990079ff0c76b972ce5ef2bac9b85334a0a1f27a | [
"Apache-2.0"
] | 13 | 2015-04-18T23:05:11.000Z | 2021-11-29T21:23:26.000Z | # advsearching package
| 11.5 | 22 | 0.826087 |
35734cd88bac755fd556604e1ee6e56afd0cee4b | 174 | py | Python | lispyc/__init__.py | csun-comp430-s22/lispy | 03820211bd3c5a83e0de4b1ac26a864080e06aca | [
"MIT"
] | null | null | null | lispyc/__init__.py | csun-comp430-s22/lispy | 03820211bd3c5a83e0de4b1ac26a864080e06aca | [
"MIT"
] | 3 | 2022-02-23T08:12:52.000Z | 2022-03-09T00:13:33.000Z | lispyc/__init__.py | csun-comp430-s22/lispy | 03820211bd3c5a83e0de4b1ac26a864080e06aca | [
"MIT"
] | null | null | null | from importlib import metadata
try:
__version__ = metadata.version("lispyc")
except metadata.PackageNotFoundError: # pragma: no cover
__version__ = "0.0.0+unknown"
| 24.857143 | 57 | 0.747126 |
3bf39d8898d28e2ea772acea9a68274b901e712b | 67,915 | py | Python | casepro/cases/tests.py | rapidpro/casepro | b61762c30f0fce64b71226271af6923868f79f7b | [
"BSD-3-Clause"
] | 21 | 2015-07-21T15:57:49.000Z | 2021-11-04T18:26:35.000Z | casepro/cases/tests.py | rapidpro/casepro | b61762c30f0fce64b71226271af6923868f79f7b | [
"BSD-3-Clause"
] | 357 | 2015-05-22T07:26:45.000Z | 2022-03-12T01:08:28.000Z | casepro/cases/tests.py | rapidpro/casepro | b61762c30f0fce64b71226271af6923868f79f7b | [
"BSD-3-Clause"
] | 24 | 2015-05-28T12:30:25.000Z | 2021-11-19T01:57:38.000Z | from datetime import datetime, timedelta
from unittest.mock import patch
import pytz
from temba_client.utils import format_iso8601
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from django.test.utils import override_settings
from django.urls import reverse
from django.utils import timezone
from casepro.contacts.models import Contact
from casepro.msgs.models import Label, Message, Outgoing
from casepro.msgs.tasks import handle_messages
from casepro.orgs_ext.models import Flow
from casepro.profiles.models import ROLE_ANALYST, ROLE_MANAGER, Notification
from casepro.test import BaseCasesTest
from casepro.utils import datetime_to_microseconds, microseconds_to_datetime
from .context_processors import sentry_dsn
from .models import AccessLevel, Case, CaseAction, CaseExport, CaseFolder, Partner
class CaseTest(BaseCasesTest):
def setUp(self):
super(CaseTest, self).setUp()
self.ann = self.create_contact(
self.unicef, "C-001", "Ann", fields={"age": "34"}, groups=[self.females, self.reporters, self.registered]
)
@patch("casepro.test.TestBackend.archive_contact_messages")
@patch("casepro.test.TestBackend.archive_messages")
@patch("casepro.test.TestBackend.stop_runs")
@patch("casepro.test.TestBackend.add_to_group")
@patch("casepro.test.TestBackend.remove_from_group")
@patch("casepro.test.TestBackend.fetch_flows")
@patch("casepro.test.TestBackend.start_flow")
def test_lifecycle(
self,
mock_start_flow,
mock_fetch_flows,
mock_remove_from_group,
mock_add_to_group,
mock_stop_runs,
mock_archive_messages,
mock_archive_contact_messages,
):
mock_fetch_flows.return_value = [Flow("0001-0001", "Registration"), Flow("0002-0002", "Follow-Up")]
followup = Flow("0002-0002", "Follow-Up")
self.unicef.set_followup_flow(followup)
d0 = datetime(2015, 1, 2, 6, 0, tzinfo=pytz.UTC)
d1 = datetime(2015, 1, 2, 7, 0, tzinfo=pytz.UTC)
d2 = datetime(2015, 1, 2, 8, 0, tzinfo=pytz.UTC)
d3 = datetime(2015, 1, 2, 9, 0, tzinfo=pytz.UTC)
d4 = datetime(2015, 1, 2, 10, 0, tzinfo=pytz.UTC)
d5 = datetime(2015, 1, 2, 11, 0, tzinfo=pytz.UTC)
d6 = datetime(2015, 1, 2, 12, 0, tzinfo=pytz.UTC)
d7 = datetime(2015, 1, 2, 13, 0, tzinfo=pytz.UTC)
self.create_message(self.unicef, 123, self.ann, "Hello", created_on=d0)
msg2 = self.create_message(self.unicef, 234, self.ann, "Hello again", [self.aids], created_on=d1)
with patch.object(timezone, "now", return_value=d1):
# MOH opens new case
case = Case.get_or_open(self.unicef, self.user1, msg2, "Summary", self.moh)
self.assertTrue(case.is_new)
self.assertEqual(case.org, self.unicef)
self.assertEqual(set(case.labels.all()), {self.aids})
self.assertEqual(set(case.watchers.all()), {self.user1})
self.assertEqual(case.assignee, self.moh)
self.assertEqual(case.contact, self.ann)
self.assertEqual(case.initial_message, msg2)
self.assertEqual(case.summary, "Summary")
self.assertEqual(case.opened_on, d1)
self.assertIsNone(case.closed_on)
actions = case.actions.order_by("pk")
self.assertEqual(len(actions), 1)
self.assertEqual(actions[0].action, CaseAction.OPEN)
self.assertEqual(actions[0].created_by, self.user1)
self.assertEqual(actions[0].created_on, d1)
self.assertEqual(actions[0].assignee, self.moh)
# message is now attached to the case
msg2.refresh_from_db()
self.assertEqual(msg2.case, case)
# check that opening the case archived the contact's messages
mock_archive_contact_messages.assert_called_once_with(self.unicef, self.ann)
mock_archive_contact_messages.reset_mock()
self.assertEqual(Message.objects.filter(contact=self.ann, is_archived=False).count(), 0)
# check that opening the case removed contact from specified suspend groups
mock_remove_from_group.assert_called_once_with(self.unicef, self.ann, self.reporters)
mock_remove_from_group.reset_mock()
# check that contacts groups were suspended
self.assertEqual(set(Contact.objects.get(pk=self.ann.pk).groups.all()), {self.females, self.registered})
self.assertEqual(set(Contact.objects.get(pk=self.ann.pk).suspended_groups.all()), {self.reporters})
# check that contact's runs were expired
mock_stop_runs.assert_called_once_with(self.unicef, self.ann)
mock_stop_runs.reset_mock()
# check that calling get_or_open again returns the same case (finds same case on message)
case2 = Case.get_or_open(self.unicef, self.user1, msg2, "Summary", self.moh)
self.assertFalse(case2.is_new)
self.assertEqual(case, case2)
# user #2 should be notified of this new case assignment
self.assertEqual(Notification.objects.count(), 1)
Notification.objects.get(user=self.user2, type=Notification.TYPE_CASE_ASSIGNMENT, case_action=actions[0])
# contact sends a reply
msg3 = self.create_message(self.unicef, 432, self.ann, "OK", created_on=d2)
handle_messages(self.unicef.pk)
# user #1 should be notified of this reply
self.assertEqual(Notification.objects.count(), 2)
Notification.objects.get(user=self.user1, type=Notification.TYPE_CASE_REPLY, message=msg3)
# which will have been archived and added to the case
mock_archive_messages.assert_called_once_with(self.unicef, [msg3])
mock_archive_messages.reset_mock()
msg3.refresh_from_db()
self.assertTrue(msg3.is_archived)
self.assertEqual(msg3.case, case)
with patch.object(timezone, "now", return_value=d2):
# other user in MOH adds a note
case.add_note(self.user2, "Interesting")
actions = case.actions.order_by("pk")
self.assertEqual(len(actions), 2)
self.assertEqual(actions[1].action, CaseAction.ADD_NOTE)
self.assertEqual(actions[1].created_by, self.user2)
self.assertEqual(actions[1].created_on, d2)
self.assertEqual(actions[1].note, "Interesting")
self.assertEqual(set(case.watchers.all()), {self.user1, self.user2})
# user #1 should be notified of this new note
self.assertEqual(Notification.objects.count(), 3)
Notification.objects.get(user=self.user1, type=Notification.TYPE_CASE_ACTION, case_action=actions[1])
# user from other partner org can't re-assign or close case
self.assertRaises(PermissionDenied, case.reassign, self.user3)
self.assertRaises(PermissionDenied, case.close, self.user3)
with patch.object(timezone, "now", return_value=d3):
# first user closes the case
case.close(self.user1)
self.assertEqual(case.opened_on, d1)
self.assertEqual(case.closed_on, d3)
actions = case.actions.order_by("pk")
self.assertEqual(len(actions), 3)
self.assertEqual(actions[2].action, CaseAction.CLOSE)
self.assertEqual(actions[2].created_by, self.user1)
self.assertEqual(actions[2].created_on, d3)
# user #2 should be notified
self.assertEqual(Notification.objects.count(), 4)
Notification.objects.get(user=self.user2, type=Notification.TYPE_CASE_ACTION, case_action=actions[2])
# check that contacts groups were restored
self.assertEqual(
set(Contact.objects.get(pk=self.ann.pk).groups.all()), {self.females, self.reporters, self.registered}
)
self.assertEqual(set(Contact.objects.get(pk=self.ann.pk).suspended_groups.all()), set())
mock_add_to_group.assert_called_once_with(self.unicef, self.ann, self.reporters)
mock_add_to_group.reset_mock()
# check our follow-up flow was started
mock_start_flow.assert_called_once_with(
self.unicef,
followup,
self.ann,
extra={
"case": {
"id": case.id,
"assignee": {"id": self.moh.id, "name": "MOH"},
"opened_on": "2015-01-02T07:00:00+00:00",
}
},
)
mock_start_flow.reset_mock()
# contact sends a message after case was closed
msg4 = self.create_message(self.unicef, 345, self.ann, "No more case", created_on=d4)
handle_messages(self.unicef.pk)
# message is not in an open case, so won't have been archived
mock_archive_messages.assert_not_called()
msg4.refresh_from_db()
self.assertFalse(msg4.is_archived)
with patch.object(timezone, "now", return_value=d4):
# but second user re-opens it
case.reopen(self.user2)
self.assertEqual(case.opened_on, d1) # unchanged
self.assertIsNone(case.closed_on)
actions = case.actions.order_by("pk")
self.assertEqual(len(actions), 4)
self.assertEqual(actions[3].action, CaseAction.REOPEN)
self.assertEqual(actions[3].created_by, self.user2)
self.assertEqual(actions[3].created_on, d4)
# user #1 should be notified
self.assertEqual(Notification.objects.count(), 5)
Notification.objects.get(user=self.user1, type=Notification.TYPE_CASE_ACTION, case_action=actions[3])
# check that re-opening the case archived the contact's messages again
mock_archive_contact_messages.assert_called_once_with(self.unicef, self.ann)
msg4.refresh_from_db()
self.assertTrue(msg4.is_archived)
with patch.object(timezone, "now", return_value=d5):
# and re-assigns it to different partner
case.reassign(self.user2, self.who)
self.assertEqual(case.assignee, self.who)
actions = case.actions.order_by("pk")
self.assertEqual(len(actions), 5)
self.assertEqual(actions[4].action, CaseAction.REASSIGN)
self.assertEqual(actions[4].created_by, self.user2)
self.assertEqual(actions[4].created_on, d5)
self.assertEqual(actions[4].assignee, self.who)
# users #1 (a watcher) and #3 (a new assignee) should be notified of this re-assignment
self.assertEqual(Notification.objects.count(), 7)
Notification.objects.get(user=self.user1, type=Notification.TYPE_CASE_ACTION, case_action=actions[4])
Notification.objects.get(user=self.user3, type=Notification.TYPE_CASE_ASSIGNMENT, case_action=actions[4])
with patch.object(timezone, "now", return_value=d6):
# user from that partner re-labels it
case.update_labels(self.user3, [self.pregnancy])
actions = case.actions.order_by("pk")
self.assertEqual(len(actions), 7)
self.assertEqual(actions[5].action, CaseAction.LABEL)
self.assertEqual(actions[5].created_by, self.user3)
self.assertEqual(actions[5].created_on, d6)
self.assertEqual(actions[5].label, self.pregnancy)
self.assertEqual(actions[6].action, CaseAction.UNLABEL)
self.assertEqual(actions[6].created_by, self.user3)
self.assertEqual(actions[6].created_on, d6)
self.assertEqual(actions[6].label, self.aids)
with patch.object(timezone, "now", return_value=d7):
# user from that partner org closes it again
case.close(self.user3)
self.assertEqual(case.opened_on, d1)
self.assertEqual(case.closed_on, d7)
actions = case.actions.order_by("pk")
self.assertEqual(len(actions), 8)
self.assertEqual(actions[7].action, CaseAction.CLOSE)
self.assertEqual(actions[7].created_by, self.user3)
self.assertEqual(actions[7].created_on, d7)
# check our follow-up flow wasn't started since this isn't the first time this case has been closed
mock_start_flow.assert_not_called()
# check that calling get_or_open again returns the same case (finds case for same message)
case3 = Case.get_or_open(self.unicef, self.user1, msg2, "Summary", self.moh)
self.assertFalse(case3.is_new)
self.assertEqual(case, case3)
@patch("casepro.test.TestBackend.add_to_group")
@patch("casepro.test.TestBackend.remove_from_group")
def test_close_case_when_contact_stopped(self, mock_remove_from_group, mock_add_to_group):
msg = self.create_message(self.unicef, 123, self.ann, "Hello 1", [self.aids])
case = Case.get_or_open(self.unicef, self.user1, msg, "Summary", self.moh)
# check that opening the case removed contact from specified suspend groups
mock_remove_from_group.assert_called_once_with(self.unicef, self.ann, self.reporters)
# stop the contact
self.ann.is_stopped = True
self.ann.save()
case.close(self.user1)
# check we don't try to put this contact back in their groups
mock_add_to_group.assert_not_called()
def test_get_all(self):
bob = self.create_contact(self.unicef, "C-002", "Bob")
cat = self.create_contact(self.unicef, "C-003", "Cat")
nic = self.create_contact(self.nyaruka, "C-104", "Nic")
msg1 = self.create_message(self.unicef, 123, self.ann, "Hello 1", [self.aids])
msg2 = self.create_message(self.unicef, 234, bob, "Hello 2", [self.aids, self.pregnancy])
msg3 = self.create_message(self.unicef, 345, cat, "Hello 3", [self.pregnancy])
msg4 = self.create_message(self.nyaruka, 456, nic, "Hello 4", [self.code])
case1 = self.create_case(self.unicef, self.ann, self.moh, msg1, [self.aids])
case2 = self.create_case(self.unicef, bob, self.who, msg2, [self.aids, self.pregnancy])
case3 = self.create_case(self.unicef, cat, self.who, msg3, [self.pregnancy])
case4 = self.create_case(self.nyaruka, nic, self.klab, msg4, [self.code])
self.assertEqual(set(Case.get_all(self.unicef)), {case1, case2, case3}) # org admins see all
self.assertEqual(set(Case.get_all(self.nyaruka)), {case4})
self.assertEqual(set(Case.get_all(self.unicef, user=self.user1)), {case1, case2, case3}) # case3 by label
self.assertEqual(set(Case.get_all(self.unicef, user=self.user2)), {case1, case2, case3})
self.assertEqual(set(Case.get_all(self.unicef, user=self.user3)), {case1, case2, case3}) # case3 by assignment
self.assertEqual(set(Case.get_all(self.nyaruka, user=self.user4)), {case4})
self.assertEqual(set(Case.get_all(self.unicef, label=self.aids)), {case1, case2})
self.assertEqual(set(Case.get_all(self.unicef, label=self.pregnancy)), {case2, case3})
self.assertEqual(set(Case.get_all(self.unicef, user=self.user1, label=self.pregnancy)), {case2, case3})
self.assertEqual(set(Case.get_all(self.unicef, user=self.user3, label=self.pregnancy)), {case2, case3})
case2.closed_on = timezone.now()
case2.save()
self.assertEqual(set(Case.get_open(self.unicef)), {case1, case3})
self.assertEqual(set(Case.get_open(self.unicef, user=self.user1, label=self.pregnancy)), {case3})
self.assertEqual(set(Case.get_closed(self.unicef)), {case2})
self.assertEqual(set(Case.get_closed(self.unicef, user=self.user1, label=self.pregnancy)), {case2})
def test_get_open_for_contact_on(self):
d0 = datetime(2014, 1, 5, 0, 0, tzinfo=pytz.UTC)
d1 = datetime(2014, 1, 10, 0, 0, tzinfo=pytz.UTC)
d2 = datetime(2014, 1, 15, 0, 0, tzinfo=pytz.UTC)
# case Jan 5th -> Jan 10th
msg1 = self.create_message(self.unicef, 123, self.ann, "Hello", created_on=d0)
case1 = self.create_case(self.unicef, self.ann, self.moh, msg1, opened_on=d0, closed_on=d1)
# case Jan 15th -> now
msg2 = self.create_message(self.unicef, 234, self.ann, "Hello again", created_on=d2)
case2 = self.create_case(self.unicef, self.ann, self.moh, msg2, opened_on=d2)
# check no cases open on Jan 4th
open_case = Case.get_open_for_contact_on(self.unicef, self.ann, datetime(2014, 1, 4, 0, 0, tzinfo=pytz.UTC))
self.assertIsNone(open_case)
# check case open on Jan 7th
open_case = Case.get_open_for_contact_on(self.unicef, self.ann, datetime(2014, 1, 7, 0, 0, tzinfo=pytz.UTC))
self.assertEqual(open_case, case1)
# check no cases open on Jan 13th
open_case = Case.get_open_for_contact_on(self.unicef, self.ann, datetime(2014, 1, 13, 0, 0, tzinfo=pytz.UTC))
self.assertIsNone(open_case)
# check case open on 20th
open_case = Case.get_open_for_contact_on(self.unicef, self.ann, datetime(2014, 1, 16, 0, 0, tzinfo=pytz.UTC))
self.assertEqual(open_case, case2)
def test_get_or_open_with_user_assignee(self):
"""
If a case is opened with the user_assignee field set, the created case should have the assigned user, and
the created case action should also have the assigned user.
"""
msg = self.create_message(
self.unicef, 123, self.ann, "Hello", created_on=datetime(2014, 1, 5, 0, 0, tzinfo=pytz.UTC)
)
case = Case.get_or_open(self.unicef, self.user2, msg, "Hello", self.moh, user_assignee=self.user1)
self.assertEqual(case.user_assignee, self.user1)
case_action = CaseAction.objects.get(case=case)
self.assertEqual(case_action.user_assignee, self.user1)
# only assigned user should be notified
self.assertEqual(Notification.objects.count(), 1)
Notification.objects.get(user=self.user1, type=Notification.TYPE_CASE_ASSIGNMENT)
def test_get_open_no_initial_message_new_case(self):
"""
We should be able to create a case with no initial message, but by supplying a contact instead.
"""
case = Case.get_or_open(
self.unicef, self.user2, None, "Hello", self.moh, user_assignee=self.user1, contact=self.ann
)
self.assertEqual(case.contact, self.ann)
self.assertEqual(case.assignee, self.moh)
self.assertEqual(case.user_assignee, self.user1)
self.assertEqual(case.initial_message, None)
self.assertEqual(case.is_new, True)
self.assertEqual(list(case.watchers.all()), [self.user2])
[case_action] = list(CaseAction.objects.filter(case=case))
self.assertEqual(case_action.action, CaseAction.OPEN)
self.assertEqual(case_action.assignee, self.moh)
self.assertEqual(case_action.user_assignee, self.user1)
def test_get_open_no_initial_message_existing_case(self):
"""
When using get_or_open with no initial message, but by supplying a contact, but that contact already has an
open case, it should return that case instead of creating a new one.
"""
case1 = Case.get_or_open(
self.unicef, self.user2, None, "Hello", self.moh, user_assignee=self.user1, contact=self.ann
)
case2 = Case.get_or_open(
self.unicef, self.user2, None, "Hello", self.moh, user_assignee=self.user1, contact=self.ann
)
self.assertEqual(case2.is_new, False)
self.assertEqual(case1, case2)
case1.close(self.user1)
case3 = Case.get_or_open(
self.unicef, self.user2, None, "Hello", self.moh, user_assignee=self.user1, contact=self.ann
)
self.assertEqual(case3.is_new, True)
self.assertNotEqual(case2, case3)
def test_get_open_no_message_or_contact(self):
"""
When using get_or_open with no initial message and no existing contact a ValueError should be raised.
"""
self.assertRaises(
ValueError,
Case.get_or_open,
self.unicef,
self.user2,
None,
"Hello",
self.moh,
user_assignee=self.user1,
contact=None,
)
def test_search(self):
d1 = datetime(2014, 1, 9, 0, 0, tzinfo=pytz.UTC)
d2 = datetime(2014, 1, 10, 0, 0, tzinfo=pytz.UTC)
d3 = datetime(2014, 1, 11, 0, 0, tzinfo=pytz.UTC)
d4 = datetime(2014, 1, 12, 0, 0, tzinfo=pytz.UTC)
d5 = datetime(2014, 1, 13, 0, 0, tzinfo=pytz.UTC)
bob = self.create_contact(self.unicef, "C-002", "Bob")
cat = self.create_contact(self.unicef, "C-003", "Cat")
don = self.create_contact(self.unicef, "C-004", "Don")
nic = self.create_contact(self.nyaruka, "C-005", "Nic")
msg1 = self.create_message(self.unicef, 101, self.ann, "Hello 1")
msg2 = self.create_message(self.unicef, 102, self.ann, "Hello 2")
msg3 = self.create_message(self.unicef, 103, bob, "Hello 3")
msg4 = self.create_message(self.unicef, 104, cat, "Hello 4")
msg5 = self.create_message(self.unicef, 105, cat, "Hello 5")
case1 = self.create_case(self.unicef, self.ann, self.moh, msg1, opened_on=d1, closed_on=d2)
case2 = self.create_case(self.unicef, self.ann, self.moh, msg2, opened_on=d2)
case3 = self.create_case(self.unicef, bob, self.who, msg3, opened_on=d3)
case4 = self.create_case(self.unicef, cat, self.who, msg4, opened_on=d4)
case5 = self.create_case(self.unicef, don, self.who, msg5, opened_on=d5, user_assignee=self.user3)
# other org
msg5 = self.create_message(self.nyaruka, 106, nic, "Hello")
self.create_case(self.nyaruka, nic, self.klab, msg5)
def assert_search(user, params, results):
self.assertEqual(list(Case.search(self.unicef, user, params)), results)
# by org admin (sees all cases)
assert_search(self.admin, {"folder": CaseFolder.open}, [case5, case4, case3, case2])
assert_search(self.admin, {"folder": CaseFolder.closed}, [case1])
# by partner user (sees only cases assigned to them)
assert_search(self.user1, {"folder": CaseFolder.open}, [case2])
assert_search(self.user1, {"folder": CaseFolder.closed}, [case1])
# by assignee (partner)
assert_search(self.user1, {"folder": CaseFolder.open, "assignee": self.moh.id}, [case2])
assert_search(self.user1, {"folder": CaseFolder.open, "assignee": self.who.id}, []) # user not in that partner
assert_search(self.user3, {"folder": CaseFolder.open, "assignee": self.who.id}, [case5, case4, case3])
# by assignee (user)
assert_search(self.user1, {"folder": CaseFolder.open, "user_assignee": self.user1.id}, [])
assert_search(self.user1, {"folder": CaseFolder.open, "user_assignee": self.user3.id}, [])
assert_search(self.user3, {"folder": CaseFolder.open, "user_assignee": self.user3.id}, [case5])
assert_search(self.user3, {"folder": CaseFolder.all, "user_assignee": self.user3.id}, [case5])
# by before/after
assert_search(self.admin, {"folder": CaseFolder.open, "before": d2}, [case2])
assert_search(self.admin, {"folder": CaseFolder.open, "after": d3}, [case5, case4, case3])
def test_access_level(self):
msg = self.create_message(self.unicef, 234, self.ann, "Hello")
case = self.create_case(self.unicef, self.ann, self.moh, msg, [self.aids])
self.assertEqual(case.access_level(self.superuser), AccessLevel.update) # superusers can update
self.assertEqual(case.access_level(self.admin), AccessLevel.update) # admins can update
self.assertEqual(case.access_level(self.user1), AccessLevel.update) # user from assigned partner can update
self.assertEqual(case.access_level(self.user3), AccessLevel.read) # user from other partner can read bc labels
self.assertEqual(case.access_level(self.user4), AccessLevel.none) # user from different org
class CaseCRUDLTest(BaseCasesTest):
def setUp(self):
super(CaseCRUDLTest, self).setUp()
self.ann = self.create_contact(
self.unicef, "C-001", "Ann", fields={"age": "34"}, groups=[self.females, self.reporters]
)
self.msg = self.create_message(self.unicef, 101, self.ann, "Hello", [self.aids])
self.case = self.create_case(
self.unicef, self.ann, self.moh, self.msg, [self.aids], summary="Summary", user_assignee=self.user1
)
@patch("casepro.test.TestBackend.archive_contact_messages")
@patch("casepro.test.TestBackend.stop_runs")
@patch("casepro.test.TestBackend.add_to_group")
@patch("casepro.test.TestBackend.remove_from_group")
def test_open(self, mock_remove_contacts, mock_add_contacts, mock_stop_runs, mock_archive_contact_messages):
CaseAction.objects.all().delete()
Message.objects.update(case=None)
Case.objects.all().delete()
Message.objects.all().delete()
msg1 = self.create_message(self.unicef, 101, self.ann, "Hello", [self.aids])
url = reverse("cases.case_open")
# log in as an administrator
self.login(self.admin)
response = self.url_post_json(
"unicef",
url,
{"message": 101, "summary": "Summary", "assignee": self.moh.pk, "user_assignee": self.user1.pk},
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json["summary"], "Summary")
self.assertEqual(response.json["is_new"], True)
self.assertEqual(response.json["watching"], True)
case1 = Case.objects.get(pk=response.json["id"])
self.assertEqual(case1.initial_message, msg1)
self.assertEqual(case1.summary, "Summary")
self.assertEqual(case1.assignee, self.moh)
self.assertEqual(case1.user_assignee, self.user1)
self.assertEqual(set(case1.labels.all()), {self.aids})
self.assertEqual(case1.contact, msg1.contact)
# try again as a non-administrator who can't create cases for other partner orgs
rick = self.create_contact(self.unicef, "C-002", "Richard")
msg2 = self.create_message(self.unicef, 102, rick, "Hello", [self.aids])
# log in as a non-administrator
self.login(self.user1)
response = self.url_post_json("unicef", url, {"message": 102, "summary": "Summary"})
self.assertEqual(response.status_code, 200)
case2 = Case.objects.get(pk=response.json["id"])
self.assertEqual(case2.initial_message, msg2)
self.assertEqual(case2.summary, "Summary")
self.assertEqual(case2.assignee, self.moh)
self.assertEqual(set(case2.labels.all()), {self.aids})
def test_open_user_assignee_not_member_of_partner(self):
"""
If the user specified in user_assignee is not a member of the partner specified by assignee, then a not found
error should be returned.
"""
self.login(self.admin)
msg = self.create_message(self.unicef, 102, self.ann, "Hello", [self.aids])
response = self.url_post_json(
"unicef",
reverse("cases.case_open"),
{"message": msg.backend_id, "summary": "Summary", "assignee": self.moh.pk, "user_assignee": self.user3.pk},
)
self.assertEqual(response.status_code, 404)
def test_open_no_message_id(self):
"""
If a case is opened, and no initial message is supplied, but instead a contact is supplied, the case should
open with a contact and no initial message instead of getting the contact from the initial message.
"""
contact = self.create_contact(self.unicef, "C-002", "TestContact")
contact.urns = ["tel:+27741234567"]
contact.save()
url = reverse("cases.case_open")
self.login(self.admin)
response = self.url_post_json(
"unicef",
url,
{
"message": None,
"summary": "Summary",
"assignee": self.moh.pk,
"user_assignee": self.user1.pk,
"urn": contact.urns[0],
},
)
self.assertEqual(response.status_code, 200)
case = Case.objects.get(pk=response.json["id"])
self.assertEqual(case.initial_message, None)
self.assertEqual(case.contact, contact)
def test_open_no_message_id_new_contact(self):
"""
If a case is opened, and no initial message is supplied, but an URN is supplied instead, and the URN doesn't
match any existing users, then a new contact should be created, and the case assigned to that contact.
"""
url = reverse("cases.case_open")
self.login(self.admin)
response = self.url_post_json(
"unicef",
url,
{
"message": None,
"summary": "Summary",
"assignee": self.moh.pk,
"user_assignee": self.user1.pk,
"urn": "tel:+27741234567",
},
)
self.assertEqual(response.status_code, 200)
case = Case.objects.get(pk=response.json["id"])
self.assertEqual(case.initial_message, None)
self.assertEqual(case.contact.urns, ["tel:+27741234567"])
def test_read(self):
url = reverse("cases.case_read", args=[self.case.pk])
# log in as non-administrator
self.login(self.user1)
response = self.url_get("unicef", url)
self.assertEqual(response.status_code, 200)
def test_note(self):
url = reverse("cases.case_note", args=[self.case.pk])
# log in as manager user in assigned partner
self.login(self.user1)
response = self.url_post_json("unicef", url, {"note": "This is a note"})
self.assertEqual(response.status_code, 204)
action = CaseAction.objects.get()
self.assertEqual(action.org, self.case.org)
self.assertEqual(action.case, self.case)
self.assertEqual(action.action, CaseAction.ADD_NOTE)
self.assertEqual(action.note, "This is a note")
self.assertEqual(action.created_by, self.user1)
# users from other partners with label access are allowed to add notes
self.login(self.user3)
response = self.url_post_json("unicef", url, {"note": "This is another note"})
self.assertEqual(response.status_code, 204)
# but not if they lose label-based access
self.case.update_labels(self.admin, [self.pregnancy])
response = self.url_post_json("unicef", url, {"note": "Yet another"})
self.assertEqual(response.status_code, 403)
# and users from other orgs certainly aren't allowed to
self.login(self.user4)
response = self.url_post_json("unicef", url, {"note": "Hey guys"})
self.assertEqual(response.status_code, 302)
def test_reassign(self):
url = reverse("cases.case_reassign", args=[self.case.pk])
# log in as manager user in currently assigned partner
self.login(self.user1)
response = self.url_post_json("unicef", url, {"assignee": self.who.pk, "user_assignee": self.user3.pk})
self.assertEqual(response.status_code, 204)
action = CaseAction.objects.get()
self.assertEqual(action.case, self.case)
self.assertEqual(action.action, CaseAction.REASSIGN)
self.assertEqual(action.created_by, self.user1)
self.assertEqual(action.user_assignee, self.user3)
self.case.refresh_from_db()
self.assertEqual(self.case.assignee, self.who)
self.assertEqual(self.case.user_assignee, self.user3)
# only user from assigned partner can re-assign
response = self.url_post_json("unicef", url, {"assignee": self.moh.pk})
self.assertEqual(response.status_code, 403)
# can only be assigned to user from assigned partner
response = self.url_post_json("unicef", url, {"assignee": self.who.pk, "user_assignee": self.user2.pk})
self.assertEqual(response.status_code, 404)
# only the assigned user should get a notification
self.assertEqual(Notification.objects.count(), 1)
self.assertEqual(Notification.objects.get().user, self.user3)
def test_reassign_no_user(self):
"""The user field should be optional, and reassignment should still work without it."""
url = reverse("cases.case_reassign", args=[self.case.pk])
# log in as manager user in currently assigned partner
self.login(self.user1)
response = self.url_post_json("unicef", url, {"assignee": self.who.pk, "user_assignee": None})
self.assertEqual(response.status_code, 204)
# notifies users in that partner org
self.assertEqual(Notification.objects.count(), 1)
self.assertEqual(Notification.objects.get().user, self.user3)
def test_close(self):
url = reverse("cases.case_close", args=[self.case.pk])
# log in as manager user in currently assigned partner
self.login(self.user1)
response = self.url_post_json("unicef", url, {"note": "It's over"})
self.assertEqual(response.status_code, 204)
action = CaseAction.objects.get()
self.assertEqual(action.case, self.case)
self.assertEqual(action.action, CaseAction.CLOSE)
self.assertEqual(action.created_by, self.user1)
self.case.refresh_from_db()
self.assertIsNotNone(self.case.closed_on)
# only user from assigned partner can close
self.login(self.user3)
self.case.reopen(self.admin, "Because")
response = self.url_post_json("unicef", url, {"note": "It's over"})
self.assertEqual(response.status_code, 403)
def test_reopen(self):
self.case.close(self.admin, "Done")
url = reverse("cases.case_reopen", args=[self.case.pk])
# log in as manager user in currently assigned partner
self.login(self.user1)
response = self.url_post_json("unicef", url, {"note": "Unfinished business"})
self.assertEqual(response.status_code, 204)
action = CaseAction.objects.get(created_by=self.user1)
self.assertEqual(action.case, self.case)
self.assertEqual(action.action, CaseAction.REOPEN)
self.case.refresh_from_db()
self.assertIsNone(self.case.closed_on)
# only user from assigned partner can reopen
self.login(self.user3)
self.case.close(self.admin, "Done")
response = self.url_post_json("unicef", url, {"note": "Unfinished business"})
self.assertEqual(response.status_code, 403)
def test_label(self):
url = reverse("cases.case_label", args=[self.case.pk])
# log in as manager user in currently assigned partner
self.login(self.user1)
# add additional label to case which this user can't access
self.case.labels.add(self.tea)
response = self.url_post_json("unicef", url, {"labels": [self.pregnancy.pk]})
self.assertEqual(response.status_code, 204)
actions = CaseAction.objects.filter(case=self.case).order_by("pk")
self.assertEqual(len(actions), 2)
self.assertEqual(actions[0].action, CaseAction.LABEL)
self.assertEqual(actions[0].label, self.pregnancy)
self.assertEqual(actions[1].action, CaseAction.UNLABEL)
self.assertEqual(actions[1].label, self.aids)
# check that tea label wasn't removed as this user doesn't have access to that label
self.case.refresh_from_db()
self.assertEqual(set(self.case.labels.all()), {self.pregnancy, self.tea})
# only user from assigned partner can label
self.login(self.user3)
response = self.url_post_json("unicef", url, {"labels": [self.aids.pk]})
self.assertEqual(response.status_code, 403)
def test_update_summary(self):
url = reverse("cases.case_update_summary", args=[self.case.pk])
# log in as manager user in currently assigned partner
self.login(self.user1)
response = self.url_post_json("unicef", url, {"summary": "New summary"})
self.assertEqual(response.status_code, 204)
action = CaseAction.objects.get(case=self.case)
self.assertEqual(action.action, CaseAction.UPDATE_SUMMARY)
self.case.refresh_from_db()
self.assertEqual(self.case.summary, "New summary")
# only user from assigned partner can change the summary
self.login(self.user3)
response = self.url_post_json("unicef", url, {"summary": "Something else"})
self.assertEqual(response.status_code, 403)
def test_reply(self):
url = reverse("cases.case_reply", args=[self.case.pk])
# log in as manager user in currently assigned partner
self.login(self.user1)
response = self.url_post_json("unicef", url, {"text": "We can help"})
self.assertEqual(response.status_code, 200)
outgoing = Outgoing.objects.get()
self.assertEqual(outgoing.activity, Outgoing.CASE_REPLY)
self.assertEqual(outgoing.text, "We can help")
self.assertEqual(outgoing.created_by, self.user1)
# only user from assigned partner can reply
self.login(self.user3)
response = self.url_post_json("unicef", url, {"text": "Hi"})
self.assertEqual(response.status_code, 403)
def test_fetch(self):
url = reverse("cases.case_fetch", args=[self.case.pk])
# log in as manager user in currently assigned partner
self.login(self.user1)
response = self.url_get("unicef", url)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.json,
{
"id": self.case.pk,
"contact": {"id": self.ann.pk, "display": "Ann"},
"assignee": {"id": self.moh.pk, "name": "MOH"},
"labels": [{"id": self.aids.pk, "name": "AIDS"}],
"summary": "Summary",
"opened_on": format_iso8601(self.case.opened_on),
"is_closed": False,
"watching": False,
"user_assignee": {"id": self.user1.pk, "name": "Evan"},
},
)
# users with label access can also fetch
self.login(self.user3)
response = self.url_get("unicef", url)
self.assertEqual(response.status_code, 200)
@patch("casepro.test.TestBackend.fetch_contact_messages")
def test_timeline(self, mock_fetch_contact_messages):
CaseAction.objects.all().delete()
Message.objects.update(case=None)
Case.objects.all().delete()
Message.objects.all().delete()
d0 = datetime(2014, 1, 2, 12, 0, tzinfo=pytz.UTC)
d1 = datetime(2014, 1, 2, 13, 0, tzinfo=pytz.UTC)
d2 = datetime(2014, 1, 2, 14, 0, tzinfo=pytz.UTC)
# local message before case time window
self.create_message(self.unicef, 100, self.ann, "Unrelated", [], created_on=d0)
# create and open case
msg1 = self.create_message(self.unicef, 101, self.ann, "What is AIDS?", [self.aids], created_on=d1)
case = self.create_case(self.unicef, self.ann, self.moh, msg1, user_assignee=self.user1)
CaseAction.create(case, self.user1, CaseAction.OPEN, assignee=self.moh, user_assignee=self.user1)
# backend has a message in the case time window that we don't have locally
remote_message1 = Outgoing(
backend_broadcast_id=102, contact=self.ann, text="Non casepro message...", created_on=d2
)
mock_fetch_contact_messages.return_value = [remote_message1]
timeline_url = reverse("cases.case_timeline", args=[case.pk])
# log in as non-administrator
self.login(self.user1)
# request all of a timeline up to now
response = self.url_get("unicef", "%s?after=" % timeline_url)
t0 = microseconds_to_datetime(response.json["max_time"])
self.assertEqual(len(response.json["results"]), 3)
self.assertEqual(response.json["results"][0]["type"], "I")
self.assertEqual(response.json["results"][0]["item"]["text"], "What is AIDS?")
self.assertEqual(response.json["results"][0]["item"]["contact"], {"id": self.ann.pk, "display": "Ann"})
self.assertEqual(
response.json["results"][0]["item"]["case"]["user_assignee"], {"id": self.user1.pk, "name": "Evan"}
)
self.assertEqual(response.json["results"][1]["type"], "O")
self.assertEqual(response.json["results"][1]["item"]["text"], "Non casepro message...")
self.assertEqual(response.json["results"][1]["item"]["contact"], {"id": self.ann.pk, "display": "Ann"})
self.assertEqual(response.json["results"][2]["type"], "A")
self.assertEqual(response.json["results"][2]["item"]["action"], "O")
# as this was the initial request, messages will have been fetched from the backend
mock_fetch_contact_messages.assert_called_once_with(self.unicef, self.ann, d1, t0)
mock_fetch_contact_messages.reset_mock()
mock_fetch_contact_messages.return_value = []
# page looks for new timeline activity
response = self.url_get("unicef", "%s?after=%s" % (timeline_url, datetime_to_microseconds(t0)))
t1 = microseconds_to_datetime(response.json["max_time"])
self.assertEqual(len(response.json["results"]), 0)
# messages won't have been fetched from the backend this time
self.assertNotCalled(mock_fetch_contact_messages)
# another user adds a note
case.add_note(self.user2, "Looks interesting")
# page again looks for new timeline activity
response = self.url_get("unicef", "%s?after=%s" % (timeline_url, datetime_to_microseconds(t1)))
t2 = microseconds_to_datetime(response.json["max_time"])
self.assertNotCalled(mock_fetch_contact_messages)
self.assertEqual(len(response.json["results"]), 1)
self.assertEqual(response.json["results"][0]["type"], "A")
self.assertEqual(response.json["results"][0]["item"]["note"], "Looks interesting")
# user sends an outgoing message
d3 = timezone.now()
outgoing = Outgoing.create_case_reply(self.unicef, self.user1, "It's bad", case)
outgoing.backend_broadcast_id = 202
outgoing.save()
# page again looks for new timeline activity
response = self.url_get("unicef", "%s?after=%s" % (timeline_url, datetime_to_microseconds(t2)))
t3 = microseconds_to_datetime(response.json["max_time"])
self.assertEqual(len(response.json["results"]), 1)
self.assertEqual(response.json["results"][0]["type"], "O")
self.assertEqual(response.json["results"][0]["item"]["text"], "It's bad")
# contact sends a reply
d4 = timezone.now()
self.create_message(self.unicef, 104, self.ann, "OK thanks", created_on=d4)
handle_messages(self.unicef.pk)
# page again looks for new timeline activity
response = self.url_get("unicef", "%s?after=%s" % (timeline_url, datetime_to_microseconds(t3)))
t4 = microseconds_to_datetime(response.json["max_time"])
self.assertEqual(len(response.json["results"]), 1)
self.assertEqual(response.json["results"][0]["type"], "I")
self.assertEqual(response.json["results"][0]["item"]["text"], "OK thanks")
# page again looks for new timeline activity
response = self.url_get("unicef", "%s?after=%s" % (timeline_url, datetime_to_microseconds(t4)))
t5 = microseconds_to_datetime(response.json["max_time"])
self.assertEqual(len(response.json["results"]), 0)
# user closes case
case.close(self.user1)
# contact sends new message after that
d5 = timezone.now()
self.create_message(self.unicef, 105, self.ann, "But wait", created_on=d5)
handle_messages(self.unicef.pk)
# page again looks for new timeline activity
response = self.url_get("unicef", "%s?after=%s" % (timeline_url, datetime_to_microseconds(t5)))
t6 = microseconds_to_datetime(response.json["max_time"])
# should show the close action but not the message after it
self.assertEqual(len(response.json["results"]), 1)
self.assertEqual(response.json["results"][0]["type"], "A")
self.assertEqual(response.json["results"][0]["item"]["action"], "C")
# another look for new timeline activity
response = self.url_get("unicef", "%s?after=%s" % (timeline_url, datetime_to_microseconds(t6)))
# nothing to see
self.assertEqual(len(response.json["results"]), 0)
# user now refreshes page...
# backend has the message sent during the case as well as the unrelated message
mock_fetch_contact_messages.return_value = [
Outgoing(backend_broadcast_id=202, contact=self.ann, text="It's bad", created_on=d3),
remote_message1,
]
# which requests all of the timeline up to now
response = self.url_get("unicef", "%s?after=" % timeline_url)
items = response.json["results"]
self.assertEqual(len(items), 7)
self.assertEqual(items[0]["type"], "I")
self.assertEqual(items[0]["item"]["text"], "What is AIDS?")
self.assertEqual(items[0]["item"]["contact"], {"id": self.ann.pk, "display": "Ann"})
self.assertEqual(items[1]["type"], "O")
self.assertEqual(items[1]["item"]["text"], "Non casepro message...")
self.assertEqual(items[1]["item"]["contact"], {"id": self.ann.pk, "display": "Ann"})
self.assertEqual(items[1]["item"]["sender"], None)
self.assertEqual(items[2]["type"], "A")
self.assertEqual(items[2]["item"]["action"], "O")
self.assertEqual(items[3]["type"], "A")
self.assertEqual(items[3]["item"]["action"], "N")
self.assertEqual(items[4]["type"], "O")
self.assertEqual(items[4]["item"]["sender"], {"id": self.user1.pk, "name": "Evan"})
self.assertEqual(items[5]["type"], "I")
self.assertEqual(items[5]["item"]["text"], "OK thanks")
self.assertEqual(items[6]["type"], "A")
self.assertEqual(items[6]["item"]["action"], "C")
# as this was the initial request, messages will have been fetched from the backend
mock_fetch_contact_messages.assert_called_once_with(self.unicef, self.ann, d1, case.closed_on)
mock_fetch_contact_messages.reset_mock()
def test_timeline_no_initial_message(self):
"""
If a case has no initial message, the timeline should start from the datetime it was opened.
"""
case = self.create_case(self.unicef, self.ann, self.moh, message=None, user_assignee=self.user1)
caseaction = CaseAction.create(case, self.user1, CaseAction.OPEN, assignee=self.moh, user_assignee=self.user1)
timeline_url = reverse("cases.case_timeline", args=[case.pk])
self.login(self.user1)
response = self.url_get("unicef", "%s?after=" % timeline_url)
[case_open] = response.json["results"]
self.assertEqual(case_open["item"]["action"], CaseAction.OPEN)
self.assertEqual(case_open["item"]["id"], caseaction.pk)
def test_search(self):
url = reverse("cases.case_search")
# create another case
msg2 = self.create_message(self.unicef, 102, self.ann, "I ♡ RapidPro")
case2 = self.create_case(self.unicef, self.ann, self.who, msg2)
# try unauthenticated
response = self.url_get("unicef", url)
self.assertLoginRedirect(response, url)
# test as org administrator
self.login(self.admin)
response = self.url_get("unicef", url, {"folder": "open"})
self.assertEqual(
response.json["results"],
[
{
"id": case2.pk,
"assignee": {"id": self.who.pk, "name": "WHO"},
"user_assignee": None,
"contact": {"id": self.ann.pk, "display": "Ann"},
"labels": [],
"summary": "",
"opened_on": format_iso8601(case2.opened_on),
"is_closed": False,
},
{
"id": self.case.pk,
"assignee": {"id": self.moh.pk, "name": "MOH"},
"user_assignee": {"id": self.user1.pk, "name": "Evan"},
"contact": {"id": self.ann.pk, "display": "Ann"},
"labels": [{"id": self.aids.pk, "name": "AIDS"}],
"summary": "Summary",
"opened_on": format_iso8601(self.case.opened_on),
"is_closed": False,
},
],
)
# test as partner user
self.login(self.user1)
response = self.url_get("unicef", url, {"folder": "open"})
self.assertEqual(
response.json["results"],
[
{
"id": self.case.pk,
"assignee": {"id": self.moh.pk, "name": "MOH"},
"user_assignee": {"id": self.user1.pk, "name": "Evan"},
"contact": {"id": self.ann.pk, "display": "Ann"},
"labels": [{"id": self.aids.pk, "name": "AIDS"}],
"summary": "Summary",
"opened_on": format_iso8601(self.case.opened_on),
"is_closed": False,
}
],
)
def test_watch_and_unwatch(self):
watch_url = reverse("cases.case_watch", args=[self.case.pk])
unwatch_url = reverse("cases.case_unwatch", args=[self.case.pk])
# log in as manager user in currently assigned partner
self.login(self.user1)
response = self.url_post("unicef", watch_url)
self.assertEqual(response.status_code, 204)
self.assertIn(self.user1, self.case.watchers.all())
response = self.url_post("unicef", unwatch_url)
self.assertEqual(response.status_code, 204)
self.assertNotIn(self.user1, self.case.watchers.all())
# only user with case access can watch
self.who.labels.remove(self.aids)
self.login(self.user3)
response = self.url_post("unicef", watch_url)
self.assertEqual(response.status_code, 403)
self.assertNotIn(self.user3, self.case.watchers.all())
class CaseExportCRUDLTest(BaseCasesTest):
@override_settings(CELERY_TASK_ALWAYS_EAGER=True, CELERY_TASK_EAGER_PROPAGATES=True)
def test_create_and_read(self):
ann = self.create_contact(
self.unicef, "C-001", "Ann", fields={"nickname": "Annie", "age": "28", "state": "WA"}
)
bob = self.create_contact(self.unicef, "C-002", "Bob", fields={"age": "32", "state": "IN"})
cat = self.create_contact(self.unicef, "C-003", "Cat", fields={"age": "64", "state": "CA"})
don = self.create_contact(self.unicef, "C-004", "Don", fields={"age": "22", "state": "NV"})
msg1 = self.create_message(self.unicef, 101, ann, "What is HIV?")
msg2 = self.create_message(self.unicef, 102, bob, "I ♡ RapidPro")
msg3 = self.create_message(self.unicef, 103, cat, "Hello")
msg4 = self.create_message(self.unicef, 104, don, "Yo")
case1 = self.create_case(self.unicef, ann, self.moh, msg1, [self.aids], summary="What is HIV?")
case2 = self.create_case(self.unicef, bob, self.who, msg2, [self.pregnancy], summary="I ♡ RapidPro")
self.create_case(self.unicef, cat, self.who, msg3, [], summary="Hello")
case4 = self.create_case(self.unicef, don, self.moh, msg4, [])
case4.close(self.user1)
# add some messages to first case
self.create_outgoing(self.unicef, self.user1, 201, Outgoing.CASE_REPLY, "Good question", ann, case=case1)
self.create_message(self.unicef, 105, ann, "I know", case=case1)
self.create_outgoing(self.unicef, self.user1, 202, Outgoing.CASE_REPLY, "It's bad", ann, case=case1)
self.create_message(self.unicef, 106, ann, "Ok", case=case1)
self.create_message(self.unicef, 107, ann, "U-Report rocks!", case=case1)
# log in as a non-administrator
self.login(self.user1)
response = self.url_post("unicef", "%s?folder=open" % reverse("cases.caseexport_create"))
self.assertEqual(response.status_code, 200)
export = CaseExport.objects.get()
self.assertEqual(export.created_by, self.user1)
workbook = self.openWorkbook(export.filename)
sheet = workbook.sheets()[0]
self.assertEqual(sheet.nrows, 3)
self.assertExcelRow(
sheet,
0,
[
"Message On",
"Opened On",
"Closed On",
"Assigned Partner",
"Labels",
"Summary",
"Messages Sent",
"Messages Received",
"Contact",
"Nickname",
"Age",
],
)
self.assertExcelRow(
sheet,
1,
[msg2.created_on, case2.opened_on, "", "WHO", "Pregnancy", "I ♡ RapidPro", 0, 0, "C-002", "", "32"],
pytz.UTC,
)
self.assertExcelRow(
sheet,
2,
[msg1.created_on, case1.opened_on, "", "MOH", "AIDS", "What is HIV?", 2, 3, "C-001", "Annie", "28"],
pytz.UTC,
)
read_url = reverse("cases.caseexport_read", args=[export.pk])
response = self.url_get("unicef", read_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["download_url"], "/caseexport/download/%d/?download=1" % export.pk)
# user from another org can't access this download
self.login(self.norbert)
response = self.url_get("unicef", read_url)
self.assertEqual(response.status_code, 302)
@override_settings(CELERY_TASK_ALWAYS_EAGER=True, CELERY_TASK_EAGER_PROPAGATES=True)
def test_create_with_no_initial_message(self):
"""When a case is exported with initial_message=None, the field should be a blank string."""
ann = self.create_contact(self.unicef, "C-001", "Ann")
case = self.create_case(self.unicef, ann, self.moh, None, [self.aids], summary="What is HIV?")
self.login(self.user1)
self.url_post("unicef", "%s?folder=open" % reverse("cases.caseexport_create"))
export = CaseExport.objects.get()
workbook = self.openWorkbook(export.filename)
sheet = workbook.sheets()[0]
self.assertExcelRow(
sheet,
0,
[
"Message On",
"Opened On",
"Closed On",
"Assigned Partner",
"Labels",
"Summary",
"Messages Sent",
"Messages Received",
"Contact",
"Nickname",
"Age",
],
)
self.assertExcelRow(
sheet,
1,
["", case.opened_on, "", self.moh.name, self.aids.name, "What is HIV?", 0, 0, ann.uuid, "", ""],
pytz.UTC,
)
class InboxViewsTest(BaseCasesTest):
def test_inbox(self):
url = reverse("cases.inbox")
response = self.url_get("unicef", url)
self.assertLoginRedirect(response, url)
# log in as administrator
self.login(self.admin)
response = self.url_get("unicef", url)
self.assertContains(response, "Administration") # org-level users admin menu
self.assertContains(response, "/org/home/") # and link to org dashboard
# log in as partner manager
self.login(self.user1)
response = self.url_get("unicef", url)
self.assertNotContains(response, "Administration")
self.assertNotContains(response, "/org/home/")
self.assertContains(response, "/partner/read/%d/" % self.moh.pk) # partner users get link to partner dashboard
self.assertContains(response, "Message Board")
self.assertContains(response, "/messageboard/")
class PartnerTest(BaseCasesTest):
def test_create(self):
wfp = Partner.create(self.unicef, "WFP", "World Food Program", None, True, [self.aids, self.pregnancy])
self.assertEqual(wfp.org, self.unicef)
self.assertEqual(wfp.name, "WFP")
self.assertEqual(str(wfp), "WFP")
self.assertEqual(set(wfp.get_labels()), {self.aids, self.pregnancy})
# create some users for this partner
jim = self.create_user(self.unicef, wfp, ROLE_MANAGER, "Jim", "jim@wfp.org")
kim = self.create_user(self.unicef, wfp, ROLE_ANALYST, "Kim", "kim@wfp.org")
self.assertEqual(set(wfp.get_users()), {jim, kim})
self.assertEqual(set(wfp.get_managers()), {jim})
self.assertEqual(set(wfp.get_analysts()), {kim})
# set kim as the primary contact for the wfp partner
wfp.primary_contact = kim
wfp.save()
self.assertEqual(wfp.primary_contact, kim)
# create a partner which is not restricted by labels
internal = Partner.create(self.unicef, "Internal", "Internal Description", None, False, [])
self.assertEqual(set(internal.get_labels()), {self.aids, self.pregnancy, self.tea})
# can't create an unrestricted partner with labels
self.assertRaises(
ValueError, Partner.create, self.unicef, "Testers", "Testers Description", None, False, [self.aids]
)
def test_release(self):
self.who.release()
self.assertFalse(self.who.is_active)
self.assertIsNone(User.objects.get(pk=self.user3.pk).get_partner(self.unicef)) # user will have been detached
class PartnerCRUDLTest(BaseCasesTest):
def test_create(self):
url = reverse("cases.partner_create")
# can't access as partner user
self.login(self.user1)
response = self.url_get("unicef", url)
self.assertLoginRedirect(response, url)
self.login(self.admin)
response = self.url_get("unicef", url)
self.assertEqual(response.status_code, 200)
self.assertEqual(
list(response.context["form"].fields.keys()),
["name", "description", "logo", "is_restricted", "labels", "loc"],
)
# create label restricted partner
response = self.url_post(
"unicef",
url,
{
"name": "Helpers",
"description": "Helpers Description",
"logo": "",
"is_restricted": True,
"labels": [self.tea.pk],
},
)
helpers = Partner.objects.get(name="Helpers")
self.assertRedirects(response, "/partner/read/%d/" % helpers.pk, fetch_redirect_response=False)
self.assertTrue(helpers.is_restricted)
self.assertEqual(set(helpers.get_labels()), {self.tea})
self.assertEqual(helpers.description, "Helpers Description")
self.assertEqual(helpers.primary_contact, None)
# create unrestricted partner
response = self.url_post(
"unicef", url, {"name": "Internal", "logo": "", "is_restricted": False, "labels": [self.tea.pk]}
)
self.assertEqual(response.status_code, 302)
internal = Partner.objects.get(name="Internal")
self.assertFalse(internal.is_restricted)
self.assertEqual(set(internal.labels.all()), set()) # submitted labels are ignored
self.assertEqual(set(internal.get_labels()), {self.aids, self.pregnancy, self.tea})
self.assertEqual(internal.description, "")
# remove all labels and check that form is still usable
Label.objects.all().delete()
response = self.url_get("unicef", url)
self.assertEqual(response.status_code, 200)
response = self.url_post(
"unicef", url, {"name": "Labelless", "description": "No labels", "logo": "", "is_restricted": True}
)
self.assertEqual(response.status_code, 302)
Partner.objects.get(name="Labelless")
def test_read(self):
url = reverse("cases.partner_read", args=[self.moh.pk])
# manager user from same partner gets full view of their own partner org
self.login(self.user1)
response = self.url_get("unicef", url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["can_manage"], True)
self.assertEqual(response.context["can_view_replies"], True)
# data-analyst user from same partner gets can't edit users
self.login(self.user2)
response = self.url_get("unicef", url)
self.assertEqual(response.context["can_manage"], False)
self.assertEqual(response.context["can_view_replies"], True)
# user from different partner but same org has limited view
self.login(self.user3)
response = self.url_get("unicef", url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["can_manage"], False)
self.assertEqual(response.context["can_view_replies"], False)
# user from different org can't
self.login(self.user4)
response = self.url_get("unicef", url)
self.assertLoginRedirect(response, url)
def test_update(self):
url = reverse("cases.partner_update", args=[self.moh.pk])
# login as analyst user
self.login(self.user2)
response = self.url_get("unicef", url)
self.assertLoginRedirect(response, url)
# login as manager user
self.login(self.user1)
# get update page
response = self.url_get("unicef", url)
self.assertEqual(response.status_code, 200)
self.assertEqual(
list(response.context["form"].fields.keys()),
["name", "description", "primary_contact", "logo", "is_restricted", "labels", "loc"],
)
# post update without name field
response = self.url_post("unicef", url)
self.assertFormError(response, "form", "name", "This field is required.")
# post name change
response = self.url_post("unicef", url, {"name": "MOH2"})
self.assertRedirects(response, "/partner/read/%d/" % self.moh.pk, fetch_redirect_response=False)
moh = Partner.objects.get(pk=self.moh.pk)
self.assertEqual(moh.name, "MOH2")
# post primary contact change
response = self.url_post("unicef", url, {"name": "MOH", "primary_contact": self.user1.pk})
self.assertRedirects(response, "/partner/read/%d/" % self.moh.pk, fetch_redirect_response=False)
moh = Partner.objects.get(pk=self.moh.pk)
self.assertEqual(moh.primary_contact, self.user1)
def test_delete(self):
url = reverse("cases.partner_delete", args=[self.moh.pk])
# try first as manager (not allowed)
self.login(self.user1)
response = self.url_post("unicef", url)
self.assertLoginRedirect(response, url)
self.assertTrue(Partner.objects.get(pk=self.moh.pk).is_active)
# try again as administrator
self.login(self.admin)
response = self.url_post("unicef", url)
self.assertEqual(response.status_code, 204)
self.assertFalse(Partner.objects.get(pk=self.moh.pk).is_active)
def test_list(self):
url = reverse("cases.partner_list")
# try as regular user
self.login(self.user2)
response = self.url_get("unicef", url)
self.assertEqual(response.status_code, 200)
partners = list(response.context["object_list"])
self.assertEqual(len(partners), 2)
self.assertEqual(partners[0].name, "MOH")
self.assertEqual(partners[1].name, "WHO")
response = self.url_get("unicef", url, HTTP_X_REQUESTED_WITH="XMLHttpRequest")
self.assertEqual(
response.json,
{
"results": [
{"id": self.moh.pk, "name": "MOH", "restricted": True},
{"id": self.who.pk, "name": "WHO", "restricted": True},
]
},
)
response = self.url_get("unicef", url + "?with_activity=1", HTTP_X_REQUESTED_WITH="XMLHttpRequest")
self.assertEqual(
response.json,
{
"results": [
{
"id": self.moh.pk,
"name": "MOH",
"restricted": True,
"replies": {
"average_referral_response_time_this_month": "0\xa0minutes",
"last_month": 0,
"this_month": 0,
"total": 0,
},
"cases": {
"average_closed_this_month": "0\xa0minutes",
"opened_this_month": 0,
"closed_this_month": 0,
"total": 0,
},
},
{
"id": self.who.pk,
"name": "WHO",
"restricted": True,
"replies": {
"average_referral_response_time_this_month": "0\xa0minutes",
"last_month": 0,
"this_month": 0,
"total": 0,
},
"cases": {
"average_closed_this_month": "0\xa0minutes",
"opened_this_month": 0,
"closed_this_month": 0,
"total": 0,
},
},
]
},
)
class ContextProcessorsTest(BaseCasesTest):
def test_sentry_dsn(self):
dsn = "https://ir78h8v3mhz91lzgd2icxzaiwtmpsx10:58l883tax2o5cae05bj517f9xmq16a2h@app.getsentry.com/44864"
with self.settings(SENTRY_DSN=dsn):
self.assertEqual(
sentry_dsn(None),
{"sentry_public_dsn": "https://ir78h8v3mhz91lzgd2icxzaiwtmpsx10@app.getsentry.com/44864"},
)
class InternalViewsTest(BaseCasesTest):
def test_status(self):
url = reverse("internal.status")
response = self.url_get("unicef", url)
self.assertEqual(response.json, {"cache": "OK", "org_tasks": "OK", "unhandled": 0})
ann = self.create_contact(self.unicef, "C-001", "Ann")
dt1 = timezone.now() - timedelta(hours=2)
dt2 = timezone.now() - timedelta(minutes=5)
self.create_message(self.unicef, 101, ann, "Hmm 1", created_on=dt1)
self.create_message(self.unicef, 102, ann, "Hmm 2", created_on=dt2)
response = self.url_get("unicef", url)
# check only message older than 1 hour counts
self.assertEqual(response.json, {"cache": "OK", "org_tasks": "OK", "unhandled": 1})
with patch("django.core.cache.cache.get") as mock_cache_get:
mock_cache_get.side_effect = ValueError("BOOM")
response = self.url_get("unicef", url)
self.assertEqual(response.json, {"cache": "ERROR", "org_tasks": "OK", "unhandled": 1})
def test_ping(self):
url = reverse("internal.ping")
response = self.url_get("unicef", url)
self.assertEqual(response.status_code, 200)
with patch("dash.orgs.models.Org.objects.first") as mock_org_first:
mock_org_first.side_effect = ValueError("BOOM")
response = self.url_get("unicef", url)
self.assertEqual(response.status_code, 500)
| 42.526612 | 119 | 0.626386 |
d47e5a58fc90cc5165e763589139231ea7d9df8b | 787 | py | Python | python/ccxt/async/urdubit.py | hippylover/ccxt | db304e95b699c1971ad37b9053ae71fcb5dc3b03 | [
"MIT"
] | 2 | 2018-03-08T20:17:22.000Z | 2021-06-01T23:36:26.000Z | python/ccxt/async/urdubit.py | August-Ghost/ccxt | 886c596ffde611b5a92cb5b6e3788ff010324c74 | [
"MIT"
] | null | null | null | python/ccxt/async/urdubit.py | August-Ghost/ccxt | 886c596ffde611b5a92cb5b6e3788ff010324c74 | [
"MIT"
] | 9 | 2018-02-20T18:24:00.000Z | 2019-06-18T14:23:11.000Z | # -*- coding: utf-8 -*-
from ccxt.async.foxbit import foxbit
class urdubit (foxbit):
def describe(self):
return self.deep_extend(super(urdubit, self).describe(), {
'id': 'urdubit',
'name': 'UrduBit',
'countries': 'PK',
'has': {
'CORS': False,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27991453-156bf3ae-6480-11e7-82eb-7295fe1b5bb4.jpg',
'api': {
'public': 'https://api.blinktrade.com/api',
'private': 'https://api.blinktrade.com/tapi',
},
'www': 'https://urdubit.com',
'doc': 'https://blinktrade.com/docs',
},
})
| 30.269231 | 126 | 0.461245 |
4c925bf4b2ab04ab5a92c13191e98c36c8185275 | 6,009 | py | Python | apps/users/migrations/0001_initial.py | fga-gpp-mds/2017.2-Grupo4 | e7cd2114ed46da879700f6163594d57e7797e367 | [
"MIT"
] | 7 | 2017-08-22T19:27:25.000Z | 2017-12-09T18:17:40.000Z | apps/users/migrations/0001_initial.py | fga-gpp-mds/2017.2-Grupo4 | e7cd2114ed46da879700f6163594d57e7797e367 | [
"MIT"
] | 89 | 2017-09-20T03:22:49.000Z | 2017-12-11T18:50:25.000Z | apps/users/migrations/0001_initial.py | fga-gpp-mds/2017.2-Grupo4 | e7cd2114ed46da879700f6163594d57e7797e367 | [
"MIT"
] | 1 | 2017-09-26T04:15:49.000Z | 2017-09-26T04:15:49.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-12-13 01:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Staff',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('name', models.CharField(max_length=150, verbose_name='Nome')),
('id_user', models.CharField(max_length=150, unique=True, verbose_name='ID de usuário')),
('email', models.EmailField(default='', max_length=254, unique=True, verbose_name='Email do usuário')),
('profile', models.IntegerField(choices=[(1, 'receptionista'), (2, 'Atendente')], default=0, verbose_name='Perfil')),
('cep', models.CharField(default='', max_length=10, verbose_name='CEP')),
('uf', models.CharField(choices=[('AC', 'Acre'), ('AL', 'Alagoas'), ('AP', 'Amapá'), ('AM', 'Amazonas'), ('BA', 'Bahia'), ('CE', 'Ceará'), ('DF', 'Distrito Federal'), ('ES', 'Espírito Santo'), ('GO', 'Goiás'), ('MA', 'Maranhão'), ('MT', 'Mato Grosso'), ('MS', 'Mato Grosso do Sul'), ('MG', 'Minas Gerais'), ('PA', 'Pará'), ('PB', 'Paraíba'), ('PR', 'Paraná'), ('PE', 'Pernambuco'), ('PI', 'Piauí'), ('RJ', 'Rio de Janeiro'), ('RN', 'Rio Grande do Norte'), ('RS', 'Rio Grande do Sul'), ('RO', 'Rondônia'), ('RR', 'Roraima'), ('SC', 'Santa Catarina'), ('SP', 'São Paulo'), ('SE', 'Sergipe'), ('TO', 'Tocantins')], max_length=2, verbose_name='UF')),
('city', models.CharField(max_length=50, verbose_name='Cidade')),
('neighborhood', models.CharField(max_length=100, verbose_name='Bairro')),
('street', models.CharField(max_length=50, verbose_name='Rua')),
('block', models.CharField(max_length=50, verbose_name='Conjunto')),
('number', models.CharField(max_length=10, verbose_name='Numero')),
('is_active', models.BooleanField(default=True)),
('is_admin', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Patient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('name', models.CharField(blank=True, default='', max_length=150, verbose_name='Nome')),
('comment_receptionist', models.CharField(blank=True, default='', max_length=300, verbose_name='Comentário do receptionista')),
('classifier_id', models.CharField(blank=True, default='', max_length=150, verbose_name='ID do Classificador')),
('guardian', models.CharField(blank=True, default='', help_text='Informe o nome do responsável', max_length=50, verbose_name='Nome do Responsável')),
('birth_date', models.DateField(blank=True, help_text='Informe a data de Nascimento', null=True, verbose_name='Data de Nascimento')),
('cpf', models.CharField(blank=True, help_text='Informe o CPF', max_length=14, null=True, unique=True, verbose_name='CPF')),
('parents_name', models.CharField(blank=True, default='', help_text='Informe o nome dos pais', max_length=150, verbose_name='Nome dos pais')),
('cep', models.CharField(blank=True, default='', help_text='Informe o CEP', max_length=10, null=True, verbose_name='CEP')),
('uf', models.CharField(blank=True, choices=[('AC', 'Acre'), ('AL', 'Alagoas'), ('AP', 'Amapá'), ('AM', 'Amazonas'), ('BA', 'Bahia'), ('CE', 'Ceará'), ('DF', 'Distrito Federal'), ('ES', 'Espírito Santo'), ('GO', 'Goiás'), ('MA', 'Maranhão'), ('MT', 'Mato Grosso'), ('MS', 'Mato Grosso do Sul'), ('MG', 'Minas Gerais'), ('PA', 'Pará'), ('PB', 'Paraíba'), ('PR', 'Paraná'), ('PE', 'Pernambuco'), ('PI', 'Piauí'), ('RJ', 'Rio de Janeiro'), ('RN', 'Rio Grande do Norte'), ('RS', 'Rio Grande do Sul'), ('RO', 'Rondônia'), ('RR', 'Roraima'), ('SC', 'Santa Catarina'), ('SP', 'São Paulo'), ('SE', 'Sergipe'), ('TO', 'Tocantins')], default='', max_length=2, verbose_name='UF')),
('city', models.CharField(blank=True, default='', max_length=50, verbose_name='Cidade')),
('neighborhood', models.CharField(blank=True, default='', max_length=100, verbose_name='Bairro')),
('street', models.CharField(blank=True, default='', max_length=50, verbose_name='Rua')),
('block', models.CharField(blank=True, default='', max_length=50, verbose_name='Conjunto')),
('number', models.CharField(blank=True, default='', max_length=10, verbose_name='Numero')),
('date', models.DateField(auto_now=True, verbose_name='Data')),
('classification', models.IntegerField(choices=[(0, 'Não classificado'), (1, 'Atendimento Imediato'), (2, 'Atendimento Hospitalar'), (3, 'Atendimento Ambulatorial'), (4, 'Atendimento Eletivo')], default=0, verbose_name='Classification')),
('gender', models.IntegerField(blank=True, choices=[(0, 'Sexo indefinido'), (1, 'Feminino'), (2, 'Masculino')], default=0, verbose_name='Genero')),
('age_range', models.IntegerField(choices=[(0, 'Faixa etária indefinida'), (1, '0 até 28 dias'), (2, '29 dias à 2 meses'), (3, '2 meses à 3 anos'), (4, '3 anos à 10 anos'), (5, 'Acima de 10 anos')], default=0, verbose_name='Faixa etária')),
('age', models.CharField(blank=True, max_length=50, verbose_name='Age')),
],
),
]
| 89.686567 | 686 | 0.594608 |
52e9f64de8c5205cbf59c33e1e1211eab970bd9c | 3,088 | py | Python | diagnostics/diagnostic_common_diagnostics/src/diagnostic_common_diagnostics/cpu_monitor.py | zhj-buffer/ROS2-driver-for-Realsense | 936cf27be4e7dc3d699ff99499e72ea8638cc622 | [
"Apache-2.0"
] | 2 | 2021-07-14T12:33:55.000Z | 2021-11-21T07:14:13.000Z | melodic/src/diagnostics/diagnostic_common_diagnostics/src/diagnostic_common_diagnostics/cpu_monitor.py | disorn-inc/ROS-melodic-python3-Opencv-4.1.1-CUDA | 3d265bb64712e3cd7dfa0ad56d78fcdebafdb4b0 | [
"BSD-3-Clause"
] | null | null | null | melodic/src/diagnostics/diagnostic_common_diagnostics/src/diagnostic_common_diagnostics/cpu_monitor.py | disorn-inc/ROS-melodic-python3-Opencv-4.1.1-CUDA | 3d265bb64712e3cd7dfa0ad56d78fcdebafdb4b0 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#
# Software License Agreement (BSD License)
#
# Copyright (c) 2017, TNO IVS, Helmond, Netherlands
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the TNO IVS nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# \author Rein Appeldoorn
import rospy
from diagnostic_updater import DiagnosticTask, Updater
from diagnostic_msgs.msg import DiagnosticStatus
import psutil
import socket
class CpuTask(DiagnosticTask):
def __init__(self, warning_percentage):
DiagnosticTask.__init__(self, "CPU Information")
self._warning_percentage = int(warning_percentage)
def run(self, stat):
cpu_percentages = psutil.cpu_percent(percpu=True)
cpu_average = sum(cpu_percentages) / len(cpu_percentages)
stat.add("CPU Load Average", cpu_average)
warn = False
for idx, val in enumerate(cpu_percentages):
stat.add("CPU {} Load".format(idx), "{}".format(val))
if val > self._warning_percentage:
warn = True
if warn:
stat.summary(DiagnosticStatus.WARN, "At least one CPU exceeds %d percent" % self._warning_percentage)
else:
stat.summary(DiagnosticStatus.OK, "CPU Average %.1f percent" % cpu_average)
return stat
def main():
hostname = socket.gethostname()
rospy.init_node('cpu_monitor_%s' % hostname.replace("-", "_"))
updater = Updater()
updater.setHardwareID(hostname)
updater.add(CpuTask(rospy.get_param("~warning_percentage", 90)))
rate = rospy.Rate(rospy.get_param("~rate", 1))
while not rospy.is_shutdown():
rate.sleep()
updater.update()
if __name__ == '__main__':
main()
| 35.906977 | 113 | 0.721503 |
5e322b4ba5e47fba979d7177c98577042658e6f8 | 9,223 | py | Python | api/actions/views.py | listinc/osf.io | b9a0357f3e9b6e905b732e750a16e9452c459d78 | [
"Apache-2.0"
] | null | null | null | api/actions/views.py | listinc/osf.io | b9a0357f3e9b6e905b732e750a16e9452c459d78 | [
"Apache-2.0"
] | 5 | 2017-02-13T19:38:59.000Z | 2018-10-17T20:38:08.000Z | api/actions/views.py | listinc/osf.io | b9a0357f3e9b6e905b732e750a16e9452c459d78 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import get_object_or_404
from guardian.shortcuts import get_objects_for_user
from rest_framework import generics
from rest_framework import permissions
from rest_framework.exceptions import NotFound, PermissionDenied
from api.actions.permissions import ReviewActionPermission
from api.actions.serializers import NodeRequestActionSerializer, ReviewActionSerializer, PreprintRequestActionSerializer
from api.base.exceptions import Conflict
from api.base.filters import ListFilterMixin
from api.base.views import JSONAPIBaseView
from api.base.parsers import (
JSONAPIMultipleRelationshipsParser,
JSONAPIMultipleRelationshipsParserForRegularJSON,
)
from api.base import permissions as base_permissions
from api.base.utils import absolute_reverse
from api.requests.views import NodeRequestMixin, PreprintRequestMixin
from api.requests.permissions import NodeRequestPermission, PreprintRequestPermission
from framework.auth.oauth_scopes import CoreScopes
from osf.models import PreprintProvider, ReviewAction, NodeRequestAction, PreprintRequestAction
def get_review_actions_queryset():
return ReviewAction.objects.include(
'creator__guids',
'target__guids',
'target__provider',
).filter(is_deleted=False)
class ActionDetail(JSONAPIBaseView, generics.RetrieveAPIView):
"""Action Detail
Actions represent state changes and/or comments on any actionable object (e.g. preprints, noderequests)
##Action Attributes
name type description
====================================================================================
date_created iso8601 timestamp timestamp that the action was created
date_modified iso8601 timestamp timestamp that the action was last modified
from_state string state of the reviewable before this action was created
to_state string state of the reviewable after this action was created
comment string comment explaining the state change
trigger string name of the trigger for this action
##Relationships
###Target
Link to the object (e.g. preprint) this action acts on
###Provider
Link to detail for the target object's provider
###Creator
Link to the user that created this action
##Links
- `self` -- Detail page for the current action
"""
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ReviewActionPermission,
# TODO: Consider generic "ActionPermission" when RequestActions are viewable
)
required_read_scopes = [CoreScopes.ACTIONS_READ]
required_write_scopes = [CoreScopes.ACTIONS_WRITE]
serializer_class = ReviewActionSerializer
view_category = 'actions'
view_name = 'action-detail'
def get_serializer_class(self):
# Not allowed to view NodeRequestActions yet, making extra logic unnecessary
return ReviewActionSerializer
def get_object(self):
action = None
if ReviewAction.objects.filter(_id=self.kwargs['action_id']).exists():
action = get_object_or_404(get_review_actions_queryset(), _id=self.kwargs['action_id'])
elif NodeRequestAction.objects.filter(_id=self.kwargs['action_id']).exists() or PreprintRequestAction.objects.filter(_id=self.kwargs['action_id']).exists():
# No permissions allow for viewing RequestActions yet
raise PermissionDenied('You do not have permission to view this Action')
if not action:
raise NotFound('Unable to find specified Action')
self.check_object_permissions(self.request, action)
return action
class ReviewActionListCreate(JSONAPIBaseView, generics.ListCreateAPIView, ListFilterMixin):
"""List of review actions viewable by this user
Actions represent state changes and/or comments on a reviewable object (e.g. a preprint)
##Action Attributes
name type description
====================================================================================
date_created iso8601 timestamp timestamp that the action was created
date_modified iso8601 timestamp timestamp that the action was last modified
from_state string state of the reviewable before this action was created
to_state string state of the reviewable after this action was created
comment string comment explaining the state change
trigger string name of the trigger for this action
##Relationships
###Target
Link to the object (e.g. preprint) this action acts on
###Provider
Link to detail for the target object's provider
###Creator
Link to the user that created this action
##Links
- `self` -- Detail page for the current action
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
Actions may be filtered by their `id`, `from_state`, `to_state`, `date_created`, `date_modified`, `creator`, `provider`, `target`
"""
# Permissions handled in get_default_django_query
permission_classes = (
permissions.IsAuthenticated,
base_permissions.TokenHasScope,
ReviewActionPermission,
)
required_read_scopes = [CoreScopes.ACTIONS_READ]
required_write_scopes = [CoreScopes.NULL]
parser_classes = (JSONAPIMultipleRelationshipsParser, JSONAPIMultipleRelationshipsParserForRegularJSON,)
serializer_class = ReviewActionSerializer
model_class = ReviewAction
ordering = ('-created',)
view_category = 'actions'
view_name = 'review-action-list'
# overrides ListCreateAPIView
def perform_create(self, serializer):
target = serializer.validated_data['target']
self.check_object_permissions(self.request, target)
if not target.provider.is_reviewed:
raise Conflict('{} is an unmoderated provider. If you are an admin, set up moderation by setting `reviews_workflow` at {}'.format(
target.provider.name,
absolute_reverse('providers:preprint-providers:preprint-provider-detail', kwargs={
'provider_id': target.provider._id,
'version': self.request.parser_context['kwargs']['version']
})
))
serializer.save(user=self.request.user)
# overrides ListFilterMixin
def get_default_queryset(self):
provider_queryset = get_objects_for_user(self.request.user, 'view_actions', PreprintProvider)
return get_review_actions_queryset().filter(target__node__is_public=True, target__provider__in=provider_queryset)
# overrides ListAPIView
def get_queryset(self):
return self.get_queryset_from_request()
class NodeRequestActionCreate(JSONAPIBaseView, generics.CreateAPIView, NodeRequestMixin):
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
NodeRequestPermission
)
required_read_scopes = [CoreScopes.NULL]
required_write_scopes = [CoreScopes.ACTIONS_WRITE]
parser_classes = (JSONAPIMultipleRelationshipsParser, JSONAPIMultipleRelationshipsParserForRegularJSON,)
serializer_class = NodeRequestActionSerializer
view_category = 'request-actions'
view_name = 'create-node-request-action'
# overrides CreateAPIView
def perform_create(self, serializer):
target = serializer.validated_data['target']
self.check_object_permissions(self.request, target)
serializer.save(user=self.request.user)
class PreprintRequestActionCreate(JSONAPIBaseView, generics.CreateAPIView, PreprintRequestMixin):
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
PreprintRequestPermission
)
required_read_scopes = [CoreScopes.NULL]
required_write_scopes = [CoreScopes.ACTIONS_WRITE]
parser_classes = (JSONAPIMultipleRelationshipsParser, JSONAPIMultipleRelationshipsParserForRegularJSON,)
serializer_class = PreprintRequestActionSerializer
view_category = 'request-actions'
view_name = 'create-preprint-request-action'
# overrides CreateAPIView
def perform_create(self, serializer):
target = serializer.validated_data['target']
self.check_object_permissions(self.request, target)
serializer.save(user=self.request.user)
| 41.545045 | 164 | 0.67169 |
94d6bc5e4d55637a4d34ab73fc2177d2a35a1953 | 626 | py | Python | app/__init__.py | d3vzer0/streamio-api | f1ec2be8ee5d0006210df15b532ca93dbee8253e | [
"MIT"
] | null | null | null | app/__init__.py | d3vzer0/streamio-api | f1ec2be8ee5d0006210df15b532ca93dbee8253e | [
"MIT"
] | 1 | 2020-09-06T10:23:12.000Z | 2020-09-06T10:23:12.000Z | app/__init__.py | d3vzer0/streamio-api | f1ec2be8ee5d0006210df15b532ca93dbee8253e | [
"MIT"
] | null | null | null | from flask import Flask
from flask_restful import Api, Resource
from flask_jwt_extended import JWTManager
from flask_cors import CORS
from flask_mongoengine import MongoEngine
from pykafka import KafkaClient
# Initialize Flask Instance and load configuration
app = Flask(__name__)
from app.configs import *
# Initialize DB and load models and views
api = Api(app)
jwt = JWTManager(app)
cors = CORS(app, resources={r"*": {"origins": "*"}})
db = MongoEngine(app)
# Import views
from app import api_generic
from app import api_matches
from app import api_snapshots
from app import api_filters
from app import api_comparepage
| 26.083333 | 52 | 0.798722 |
3c9029bc19119c7a4cd87bf66abc843a0936c694 | 1,625 | py | Python | blog/bits/generators.py | 100stacks/100stacks.github.io | 4b08710530a4b20fdbc6dd65324fc46aaf3aee73 | [
"Unlicense"
] | null | null | null | blog/bits/generators.py | 100stacks/100stacks.github.io | 4b08710530a4b20fdbc6dd65324fc46aaf3aee73 | [
"Unlicense"
] | 2 | 2019-08-11T10:11:34.000Z | 2019-08-11T20:15:17.000Z | blog/bits/generators.py | 100stacks/100stacks.github.io | 4b08710530a4b20fdbc6dd65324fc46aaf3aee73 | [
"Unlicense"
] | 1 | 2016-07-21T20:37:15.000Z | 2016-07-21T20:37:15.000Z | # Python Generators - How, Why, and When to use them
# https://www.youtube.com/watch?v=bD05uGo_sVI
## Simple List
def square_numbers(nums):
result = []
for i in nums:
result.append(i*i)
return result
sample = [1,2,3,4,5]
#print(square_numbers(sample)) # outputs [1, 4, 9, 16, 25]
# Now using Generators
def square_generator(nums):
for i in nums:
yield (i*i)
g = square_generator(sample)
print(square_generator(sample)) # returns a generator object, which does to return all the results.
# It YIELDS one result at time. At this point it no results are returned.
print(next(g)) # Syntax for Python 3.x is different from Python 2.7.x
print(next(g))
print(next(g))
print(next(g))
print(next(g)) # Prints 25, for the LAST value in the LIST.
print(next(g)) # Since the LIST is exhausted, the generator throws a `StopIteration` error
# Below shows how we can iteratre through a generator object
for num in g:
print(num)
## Another way to loop through Generator object
print("Another way to loop through Generator object:")
for num in g:
print(num)
## List Comprehension
print("Using List Comprehension:")
sample2 = [x*x for x in [2,3,5,7,9]]
print(sample2)
## A Generator using shortcut annotation
print("Generator using shortcut annotation:")
sample3 = (x*x for x in [2,3,5,7,9])
print(sample3) # print the generator object
for num in sample3:
print(num) # prints 4, 9, 25, 49, 81
## To print Generator as List
sample3 = (x*x for x in [2,3,5,7,9])
print(sample3) # print the generator object
print (list(sample3))
| 27.083333 | 106 | 0.677538 |
692923a25cb2a0989474d5917bc8776b7cd4d9fd | 17,749 | py | Python | otc_doc_convertor/convertor.py | opentelekomcloud-docs/doc-exports | 8ee7373ddf0010fc06333fd60cc93a93c6f68c07 | [
"Apache-2.0"
] | null | null | null | otc_doc_convertor/convertor.py | opentelekomcloud-docs/doc-exports | 8ee7373ddf0010fc06333fd60cc93a93c6f68c07 | [
"Apache-2.0"
] | null | null | null | otc_doc_convertor/convertor.py | opentelekomcloud-docs/doc-exports | 8ee7373ddf0010fc06333fd60cc93a93c6f68c07 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import argparse
import bs4
import json
import logging
import os
import pathlib
import re
import shutil
class OTCDocConvertor:
def __init__(self):
self.doc_anchors = dict()
self.doc_links = dict()
@staticmethod
def get_new_name(current_name):
new_name = current_name.replace(' - ', '_')
new_name = new_name.replace(' ', '_')
new_name = new_name.replace('/', '_')
new_name = new_name.replace('\'', '')
new_name = new_name.replace('"', '')
new_name = new_name.replace('`', '')
new_name = new_name.replace('´', '')
new_name = new_name.replace(':', '')
new_name = new_name.replace('?', '')
new_name = new_name.replace('(', '')
new_name = new_name.replace(')', '')
new_name = new_name.lower()
return new_name
@staticmethod
def build_doc_tree(metadata):
flat_tree = dict()
for k, v in metadata.items():
parent_id = v.get('p_code')
if not parent_id:
parent_id = 0
if parent_id not in flat_tree:
flat_tree[parent_id] = list()
flat_tree[parent_id].append(v)
return flat_tree
@classmethod
def get_target_path(cls, code, metadata, path=''):
if code in metadata:
current = metadata[code]
if not current.get('p_code'):
return current['new_name']
else:
return (
"{0}/{1}".format(
cls.get_target_path(current['p_code'], metadata),
current['new_name'])
)
else:
return ''
def make_label(self, soup, name):
label = soup.new_tag("p")
label.string = f"..\\_{name.lower()}:"
return label
def is_element_referred(self, ref, fname):
return (
ref in self.doc_links
or '#' + ref in self.doc_links
or fname + '#' + ref in self.doc_links
)
def streamline_html(self, soup, file_name):
# Drop eventual header duplicated anchors
fname = file_name.replace(".html", "").lower()
met_page_anchors = dict()
for lnk in soup.body.find_all("a"):
name = None
if "name" in lnk.attrs and lnk.string is None:
name = lnk.attrs["name"].lower()
if name in met_page_anchors:
# Such anchor already existed on this page, drop it
lnk.decompose()
met_page_anchors[name] = True
if name and name.lower() == fname:
lnk.decompose()
# Process divs
for i in soup.body.find_all('div'):
if "note" in i.get('class', []):
# Notes
del i['id']
if i.img:
i.img.decompose()
notetitle = i.find('span', class_='notetitle')
if notetitle:
title = soup.new_tag('div')
title['class'] = 'title'
title.string = 'Note:'
notetitle.replace_with(title)
elif "notice" in i.get('class', []):
# Notices
del i['id']
if i.img:
i.img.decompose()
i['class'] = 'important'
elif "caution" in i.get('class', []):
# Cautions
del i['id']
if i.img:
i.img.decompose()
elif "fignone" in i.get('class', []):
# Figures
# When we found figure generate local label (anchor)
if i.get('id'):
logging.debug('place figure label')
i.insert_before(self.make_label(soup, i.get("id")))
figure = soup.new_tag('figure')
img = i.find('img')
cap = i.find('span', class_='figcap')
if cap is not None:
cap.name = 'figcaption'
figure.append(cap)
if img:
# Store all referred images for copying
self.doc_images.add(img['src'])
img['src'] = '/_static/images/' + img['src']
figure.append(img)
i.replace_with(figure)
elif "section" in i.get('class', []):
# Sections
# When we found section generate local label (anchor)
if i.get('id'):
sec_id = i.get("id").lower()
if self.is_element_referred(sec_id, file_name):
logging.debug('Add section label')
i.insert_before(self.make_label(soup, sec_id))
# and still convert to paragraph
i.name = 'p'
else:
i.name = 'p'
# Process remaining images
for img in soup.body.find_all('img'):
if img['src'] and not img['src'].startswith('/_static/images'):
self.doc_images.add(img['src'])
img['src'] = '/_static/images/' + img['src']
# Drop strong in table headers "/"
for th in soup.body.find_all('th'):
if th.p.strong:
th.p.strong.unwrap()
if self.args.improve_table_headers:
# Add spaces around "/"
for th in soup.body.find_all('th'):
if hasattr(th, 'p') and th.p.string:
th.p.string = re.sub(
r'\b/\b',
' / ',
th.p.string)
# local anchors
for lnk in soup.body.find_all("a"):
if (
lnk.string is None
and hasattr(lnk, "name")
and not re.match(r"^li\d+$", lnk.attrs["name"])
# anywhere section
and not re.match(r".*section\d+$", lnk.attrs["name"])
# starts with table
and not re.match(r"^table\d+$", lnk.attrs["name"])
):
# Verify this is really called from somewhere:
local_ref = lnk["name"].lower()
if self.is_element_referred(local_ref, file_name):
# We now know something in the document wants this anchor -
# replace it with label
lnk.name = "p"
lnk.string = f"..\\_{local_ref}:"
del lnk["name"]
else:
logging.debug("Dropping unreferred link")
for li in soup.body.find_all("li"):
del li['id']
for pre in soup.body.find_all("pre"):
text = pre.get_text()
# if text.startswith("{"):
# pre["class"] = "data"
if re.search(
r'\[[a-z]*@\w+.*\][\s#>]?',
text
):
# Something like "[root@ecs-test-0001 ~]#"
pre["class"] = "console"
elif re.match(
r'^(GET|PUT|POST|DELETE)',
text
):
# Something like "DELETE https://some_url"
pre["class"] = "text"
# And now specialities
rawize_strings = [
# "\*\*\*\*\*\*",
# r"([\\\/\:\*\?\"\~|<>]{4,})"
# ModelArts UMN contain this "unallowed" sequence
r"(\\/:\*\?\"<>\|)"
]
for to_rawize in rawize_strings:
for p in soup.body.find_all(string=re.compile(to_rawize)):
if p.string:
curr = p.string
part = re.search(to_rawize, curr)
if len(part.groups()) > 0:
new = curr.replace(
part.group(1),
f"<code>{part.group(1)}</code>"
)
p.replace_with(bs4.BeautifulSoup(new, 'html.parser'))
print(part.group(1))
print(f"New content is {p.string}")
else:
print('ups')
logging.error(f"String with star: {p}")
return soup.body
def main(self):
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description='Process links.')
parser.add_argument(
'path', type=str, help='path to the files')
parser.add_argument(
'--improve-table-headers', action='store_true',
help='Improve table headers by enforcing spaces around `/`')
parser.add_argument(
'--pygments-lexer',
help='Set particular code-block lexer language')
parser.add_argument(
'--dest',
help='Directory to write resulting files')
self.args = parser.parse_args()
meta_data = json.loads(open(
pathlib.Path(self.args.path, "CLASS.TXT.json")
).read())
metadata_by_uri = dict()
metadata_by_code = dict()
self.doc_images = set()
if self.args.dest:
dest = pathlib.Path(self.args.dest)
else:
dest = pathlib.Path(self.args.path, 'result')
for f in meta_data:
f['new_name'] = self.get_new_name(f['title'])
metadata_by_uri[f['uri']] = f
metadata_by_code[f.get('code')] = f
tree = self.build_doc_tree(metadata_by_code)
pathlib.Path(self.args.path, "temp/").mkdir(
parents=True, exist_ok=True)
# Scan all docs for anchors
for f in pathlib.Path(self.args.path).glob("*.html"):
if f.name not in metadata_by_uri:
continue
# Registering section links
with open(f, 'r') as reader:
logging.debug(f"Scanning {f.name}")
content = reader.read()
soup = bs4.BeautifulSoup(content, "lxml")
for lnk in soup.body.find_all('a'):
if "name" in lnk.attrs and lnk.string is None:
anchor = lnk.attrs["name"]
title = re.sub('[ _:]', '-', anchor)
res = dict(
fname=f.name,
title=title,
replace=title.lower()
)
self.doc_anchors[anchor] = res
if "href" in lnk.attrs and lnk["href"]:
self.doc_links[lnk["href"].lower()] = f.name
for f in pathlib.Path(self.args.path).glob("*.html"):
if f.name not in metadata_by_uri:
continue
_target = metadata_by_uri[f.name]
target = _target['new_name']
target_path = self.get_target_path(
_target['p_code'], metadata_by_code)
pathlib.Path(self.args.path, "temp").mkdir(
parents=True, exist_ok=True)
pathlib.Path(self.args.path, "tmp_result/" + target_path).mkdir(
parents=True, exist_ok=True)
pathlib.Path(dest, target_path).mkdir(
parents=True, exist_ok=True)
# Pre-processing of html content
with open(f, 'r') as reader, \
open(pathlib.Path(self.args.path,
f"temp/{target}.tmp"), 'w') as writer:
# if f.name not in [
# ]:
# continue
logging.info(f"Pre-Processing {f} as {target}")
content = reader.read()
soup = bs4.BeautifulSoup(content, "lxml")
proc = self.streamline_html(soup, f.name)
for lnk in proc.find_all("a"):
href = lnk.get('href')
if href and not href.startswith('http'):
# Internal link - replace with :ref:
code = soup.new_tag('code')
code['class'] = "interpreted-text"
code['role'] = "ref"
href_parts = href.split('#')
if len(href_parts) > 1:
# for anchor just use anchor ref
link_target = href_parts[1].lower()
else:
# for other page - use only page name
link_target = href_parts[0].replace(
".html", "").lower()
if link_target:
# Looks like an anchor on the same page
code.string = f"{lnk.string} <{link_target}>"
logging.debug(f" replace {lnk} with {code}")
lnk.replace_with(code)
# Drop parent link at the bottom of the page
for parent in proc.find_all("p", class_="parentlink"):
parent.decompose()
logging.info(f'Saving file {writer.name}')
writer.write(str(proc))
# Convert html to rst
os.system(
f"pandoc '{self.args.path}/temp/{target}.tmp' -f html "
f"-o '{self.args.path}/tmp_result/{target_path}/{target}.rst' "
f"--ascii -s --wrap none"
)
# Post processing of rendered rst
with open(f"{self.args.path}/tmp_result/"
f"{target_path}/{target}.rst", 'r') \
as reader, \
open(pathlib.Path(dest, target_path,
f"{target}.rst"), 'w') as writer:
logging.info(f"Post processing {target}")
writer.write(f":original_name: {f.name}\n\n")
# Add root file label
writer.write(f".. _{f.name.replace('.html', '')}:\n\n")
# post process some usual stuff
for line in reader.readlines():
processed_line = re.sub(r'\.\.\\_', '.. _', line)
processed_line = re.sub(r'√', 'Y', processed_line)
processed_line = re.sub(
r'public_sys-resources/', '', processed_line)
processed_line = re.sub(
r' :name: .*$', '', processed_line)
processed_line = re.sub(
r'\*\*Parent topic:.*$', '', processed_line)
processed_line = re.sub(
r'.. code:: screen$',
r'.. code-block::', processed_line)
for lexer in ["json", "bash", "text", "console"]:
processed_line = re.sub(
f".. code:: {lexer}$",
f".. code-block:: {lexer}", processed_line)
if re.match(rf".. code:: {lexer}\s", processed_line):
logging.error(
f"'code-block: {lexer}' with something "
"afterwards")
exit(1)
# spaces are important, since code-block may reside inside
# of the cell
processed_line = re.sub(
r'.. code:: screen\s',
r'.. code-block:: ', processed_line)
processed_line = re.sub(
r'.. code:: codeblock$',
r'.. code-block::', processed_line)
processed_line = re.sub(r'[ \t]*$', '', processed_line)
writer.write(processed_line)
# Generate indexes
for k, v in tree.items():
path = ''
title = 'Main Index'
page_label = ''
if k != 0:
curr = metadata_by_code[k]
title = curr['title']
page_label = curr['uri'].replace(".html", "").lower()
path = self.get_target_path(curr['code'], metadata_by_code)
with open(pathlib.Path(dest, path, "index.rst"), "w") as index:
if page_label:
index.write(f".. _{page_label}:\n\n")
index.write('=' * (len(title)) + '\n')
index.write(title + '\n')
index.write('=' * (len(title)) + '\n')
index.write('\n')
index.write('.. toctree::\n')
index.write(' :maxdepth: 1\n\n')
for child in v:
new_name = child['new_name']
if child['code'] in tree:
# If this is folder - add /index
new_name = new_name + '/index'
index.write(f" {new_name}\n")
p = pathlib.Path(dest, f"{path}.rst")
if p.exists():
logging.warning(
f"{p.resolve()} is removed in favour"
f" of result/{path}/index.rst")
p.unlink()
# Copy used images
if len(self.doc_images) > 0:
logging.debug("Processing images")
img_dest = pathlib.Path(dest, '_static', 'images')
img_dest.mkdir(parents=True, exist_ok=True)
for img in self.doc_images:
shutil.copyfile(
pathlib.Path(self.args.path, img).resolve(strict=False),
pathlib.Path(img_dest, img).resolve(strict=False)
)
def main():
OTCDocConvertor().main()
if __name__ == "__main__":
main()
| 39.795964 | 79 | 0.454843 |
a7fcdc280ff334e840fc839412fd48f96d6ee512 | 24,262 | py | Python | xarray/core/alignment.py | jenssss/xarray | 43a2a4bdf3a492d89aae9f2c5b0867932ff51cef | [
"Apache-2.0"
] | null | null | null | xarray/core/alignment.py | jenssss/xarray | 43a2a4bdf3a492d89aae9f2c5b0867932ff51cef | [
"Apache-2.0"
] | null | null | null | xarray/core/alignment.py | jenssss/xarray | 43a2a4bdf3a492d89aae9f2c5b0867932ff51cef | [
"Apache-2.0"
] | null | null | null | import functools
import operator
from collections import defaultdict
from contextlib import suppress
from typing import TYPE_CHECKING, Any, Dict, Hashable, Mapping, Optional, Tuple, Union
import numpy as np
import pandas as pd
from . import dtypes, utils
from .indexing import get_indexer_nd
from .utils import is_dict_like, is_full_slice
from .variable import IndexVariable, Variable
if TYPE_CHECKING:
from .dataarray import DataArray
from .dataset import Dataset
def _get_joiner(join):
if join == "outer":
return functools.partial(functools.reduce, operator.or_)
elif join == "inner":
return functools.partial(functools.reduce, operator.and_)
elif join == "left":
return operator.itemgetter(0)
elif join == "right":
return operator.itemgetter(-1)
elif join == "exact":
# We cannot return a function to "align" in this case, because it needs
# access to the dimension name to give a good error message.
return None
elif join == "override":
# We rewrite all indexes and then use join='left'
return operator.itemgetter(0)
else:
raise ValueError("invalid value for join: %s" % join)
def _override_indexes(objects, all_indexes, exclude):
for dim, dim_indexes in all_indexes.items():
if dim not in exclude:
lengths = {index.size for index in dim_indexes}
if len(lengths) != 1:
raise ValueError(
"Indexes along dimension %r don't have the same length."
" Cannot use join='override'." % dim
)
objects = list(objects)
for idx, obj in enumerate(objects[1:]):
new_indexes = {}
for dim in obj.indexes:
if dim not in exclude:
new_indexes[dim] = all_indexes[dim][0]
objects[idx + 1] = obj._overwrite_indexes(new_indexes)
return objects
def align(
*objects,
join="inner",
copy=True,
indexes=None,
exclude=frozenset(),
fill_value=dtypes.NA,
):
"""
Given any number of Dataset and/or DataArray objects, returns new
objects with aligned indexes and dimension sizes.
Array from the aligned objects are suitable as input to mathematical
operators, because along each dimension they have the same index and size.
Missing values (if ``join != 'inner'``) are filled with ``fill_value``.
The default fill value is NaN.
Parameters
----------
*objects : Dataset or DataArray
Objects to align.
join : {"outer", "inner", "left", "right", "exact", "override"}, optional
Method for joining the indexes of the passed objects along each
dimension:
- "outer": use the union of object indexes
- "inner": use the intersection of object indexes
- "left": use indexes from the first object with each dimension
- "right": use indexes from the last object with each dimension
- "exact": instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- "override": if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
copy : bool, optional
If ``copy=True``, data in the return values is always copied. If
``copy=False`` and reindexing is unnecessary, or can be performed with
only slice operations, then the output may share memory with the input.
In either case, new xarray objects are always returned.
indexes : dict-like, optional
Any indexes explicitly provided with the `indexes` argument should be
used in preference to the aligned indexes.
exclude : sequence of str, optional
Dimensions that must be excluded from alignment
fill_value : scalar, optional
Value to use for newly missing values
Returns
-------
aligned : DataArray or Dataset
Tuple of objects with the same type as `*objects` with aligned
coordinates.
Raises
------
ValueError
If any dimensions without labels on the arguments have different sizes,
or a different size than the size of the aligned dimension labels.
Examples
--------
>>> import xarray as xr
>>> x = xr.DataArray(
... [[25, 35], [10, 24]],
... dims=("lat", "lon"),
... coords={"lat": [35.0, 40.0], "lon": [100.0, 120.0]},
... )
>>> y = xr.DataArray(
... [[20, 5], [7, 13]],
... dims=("lat", "lon"),
... coords={"lat": [35.0, 42.0], "lon": [100.0, 120.0]},
... )
>>> x
<xarray.DataArray (lat: 2, lon: 2)>
array([[25, 35],
[10, 24]])
Coordinates:
* lat (lat) float64 35.0 40.0
* lon (lon) float64 100.0 120.0
>>> y
<xarray.DataArray (lat: 2, lon: 2)>
array([[20, 5],
[ 7, 13]])
Coordinates:
* lat (lat) float64 35.0 42.0
* lon (lon) float64 100.0 120.0
>>> a, b = xr.align(x, y)
>>> a
<xarray.DataArray (lat: 1, lon: 2)>
array([[25, 35]])
Coordinates:
* lat (lat) float64 35.0
* lon (lon) float64 100.0 120.0
>>> b
<xarray.DataArray (lat: 1, lon: 2)>
array([[20, 5]])
Coordinates:
* lat (lat) float64 35.0
* lon (lon) float64 100.0 120.0
>>> a, b = xr.align(x, y, join="outer")
>>> a
<xarray.DataArray (lat: 3, lon: 2)>
array([[25., 35.],
[10., 24.],
[nan, nan]])
Coordinates:
* lat (lat) float64 35.0 40.0 42.0
* lon (lon) float64 100.0 120.0
>>> b
<xarray.DataArray (lat: 3, lon: 2)>
array([[20., 5.],
[nan, nan],
[ 7., 13.]])
Coordinates:
* lat (lat) float64 35.0 40.0 42.0
* lon (lon) float64 100.0 120.0
>>> a, b = xr.align(x, y, join="outer", fill_value=-999)
>>> a
<xarray.DataArray (lat: 3, lon: 2)>
array([[ 25, 35],
[ 10, 24],
[-999, -999]])
Coordinates:
* lat (lat) float64 35.0 40.0 42.0
* lon (lon) float64 100.0 120.0
>>> b
<xarray.DataArray (lat: 3, lon: 2)>
array([[ 20, 5],
[-999, -999],
[ 7, 13]])
Coordinates:
* lat (lat) float64 35.0 40.0 42.0
* lon (lon) float64 100.0 120.0
>>> a, b = xr.align(x, y, join="left")
>>> a
<xarray.DataArray (lat: 2, lon: 2)>
array([[25, 35],
[10, 24]])
Coordinates:
* lat (lat) float64 35.0 40.0
* lon (lon) float64 100.0 120.0
>>> b
<xarray.DataArray (lat: 2, lon: 2)>
array([[20., 5.],
[nan, nan]])
Coordinates:
* lat (lat) float64 35.0 40.0
* lon (lon) float64 100.0 120.0
>>> a, b = xr.align(x, y, join="right")
>>> a
<xarray.DataArray (lat: 2, lon: 2)>
array([[25., 35.],
[nan, nan]])
Coordinates:
* lat (lat) float64 35.0 42.0
* lon (lon) float64 100.0 120.0
>>> b
<xarray.DataArray (lat: 2, lon: 2)>
array([[20, 5],
[ 7, 13]])
Coordinates:
* lat (lat) float64 35.0 42.0
* lon (lon) float64 100.0 120.0
>>> a, b = xr.align(x, y, join="exact")
Traceback (most recent call last):
...
"indexes along dimension {!r} are not equal".format(dim)
ValueError: indexes along dimension 'lat' are not equal
>>> a, b = xr.align(x, y, join="override")
>>> a
<xarray.DataArray (lat: 2, lon: 2)>
array([[25, 35],
[10, 24]])
Coordinates:
* lat (lat) float64 35.0 40.0
* lon (lon) float64 100.0 120.0
>>> b
<xarray.DataArray (lat: 2, lon: 2)>
array([[20, 5],
[ 7, 13]])
Coordinates:
* lat (lat) float64 35.0 40.0
* lon (lon) float64 100.0 120.0
"""
if indexes is None:
indexes = {}
if not indexes and len(objects) == 1:
# fast path for the trivial case
(obj,) = objects
return (obj.copy(deep=copy),)
all_indexes = defaultdict(list)
unlabeled_dim_sizes = defaultdict(set)
for obj in objects:
for dim in obj.dims:
if dim not in exclude:
try:
index = obj.indexes[dim]
except KeyError:
unlabeled_dim_sizes[dim].add(obj.sizes[dim])
else:
all_indexes[dim].append(index)
if join == "override":
objects = _override_indexes(objects, all_indexes, exclude)
# We don't reindex over dimensions with all equal indexes for two reasons:
# - It's faster for the usual case (already aligned objects).
# - It ensures it's possible to do operations that don't require alignment
# on indexes with duplicate values (which cannot be reindexed with
# pandas). This is useful, e.g., for overwriting such duplicate indexes.
joiner = _get_joiner(join)
joined_indexes = {}
for dim, matching_indexes in all_indexes.items():
if dim in indexes:
index = utils.safe_cast_to_index(indexes[dim])
if (
any(not index.equals(other) for other in matching_indexes)
or dim in unlabeled_dim_sizes
):
joined_indexes[dim] = index
else:
if (
any(
not matching_indexes[0].equals(other)
for other in matching_indexes[1:]
)
or dim in unlabeled_dim_sizes
):
if join == "exact":
raise ValueError(f"indexes along dimension {dim!r} are not equal")
index = joiner(matching_indexes)
joined_indexes[dim] = index
else:
index = matching_indexes[0]
if dim in unlabeled_dim_sizes:
unlabeled_sizes = unlabeled_dim_sizes[dim]
labeled_size = index.size
if len(unlabeled_sizes | {labeled_size}) > 1:
raise ValueError(
"arguments without labels along dimension %r cannot be "
"aligned because they have different dimension size(s) %r "
"than the size of the aligned dimension labels: %r"
% (dim, unlabeled_sizes, labeled_size)
)
for dim in unlabeled_dim_sizes:
if dim not in all_indexes:
sizes = unlabeled_dim_sizes[dim]
if len(sizes) > 1:
raise ValueError(
"arguments without labels along dimension %r cannot be "
"aligned because they have different dimension sizes: %r"
% (dim, sizes)
)
result = []
for obj in objects:
valid_indexers = {k: v for k, v in joined_indexes.items() if k in obj.dims}
if not valid_indexers:
# fast path for no reindexing necessary
new_obj = obj.copy(deep=copy)
else:
new_obj = obj.reindex(copy=copy, fill_value=fill_value, **valid_indexers)
new_obj.encoding = obj.encoding
result.append(new_obj)
return tuple(result)
def deep_align(
objects,
join="inner",
copy=True,
indexes=None,
exclude=frozenset(),
raise_on_invalid=True,
fill_value=dtypes.NA,
):
"""Align objects for merging, recursing into dictionary values.
This function is not public API.
"""
from .dataarray import DataArray
from .dataset import Dataset
if indexes is None:
indexes = {}
def is_alignable(obj):
return isinstance(obj, (DataArray, Dataset))
positions = []
keys = []
out = []
targets = []
no_key = object()
not_replaced = object()
for position, variables in enumerate(objects):
if is_alignable(variables):
positions.append(position)
keys.append(no_key)
targets.append(variables)
out.append(not_replaced)
elif is_dict_like(variables):
current_out = {}
for k, v in variables.items():
if is_alignable(v) and k not in indexes:
# Skip variables in indexes for alignment, because these
# should to be overwritten instead:
# https://github.com/pydata/xarray/issues/725
# https://github.com/pydata/xarray/issues/3377
# TODO(shoyer): doing this here feels super-hacky -- can we
# move it explicitly into merge instead?
positions.append(position)
keys.append(k)
targets.append(v)
current_out[k] = not_replaced
else:
current_out[k] = v
out.append(current_out)
elif raise_on_invalid:
raise ValueError(
"object to align is neither an xarray.Dataset, "
"an xarray.DataArray nor a dictionary: {!r}".format(variables)
)
else:
out.append(variables)
aligned = align(
*targets,
join=join,
copy=copy,
indexes=indexes,
exclude=exclude,
fill_value=fill_value,
)
for position, key, aligned_obj in zip(positions, keys, aligned):
if key is no_key:
out[position] = aligned_obj
else:
out[position][key] = aligned_obj
# something went wrong: we should have replaced all sentinel values
for arg in out:
assert arg is not not_replaced
if is_dict_like(arg):
assert all(value is not not_replaced for value in arg.values())
return out
def reindex_like_indexers(
target: "Union[DataArray, Dataset]", other: "Union[DataArray, Dataset]"
) -> Dict[Hashable, pd.Index]:
"""Extract indexers to align target with other.
Not public API.
Parameters
----------
target : Dataset or DataArray
Object to be aligned.
other : Dataset or DataArray
Object to be aligned with.
Returns
-------
Dict[Hashable, pandas.Index] providing indexes for reindex keyword
arguments.
Raises
------
ValueError
If any dimensions without labels have different sizes.
"""
indexers = {k: v for k, v in other.indexes.items() if k in target.dims}
for dim in other.dims:
if dim not in indexers and dim in target.dims:
other_size = other.sizes[dim]
target_size = target.sizes[dim]
if other_size != target_size:
raise ValueError(
"different size for unlabeled "
"dimension on argument %r: %r vs %r"
% (dim, other_size, target_size)
)
return indexers
def reindex_variables(
variables: Mapping[Any, Variable],
sizes: Mapping[Any, int],
indexes: Mapping[Any, pd.Index],
indexers: Mapping,
method: Optional[str] = None,
tolerance: Any = None,
copy: bool = True,
fill_value: Optional[Any] = dtypes.NA,
sparse: bool = False,
) -> Tuple[Dict[Hashable, Variable], Dict[Hashable, pd.Index]]:
"""Conform a dictionary of aligned variables onto a new set of variables,
filling in missing values with NaN.
Not public API.
Parameters
----------
variables : dict-like
Dictionary of xarray.Variable objects.
sizes : dict-like
Dictionary from dimension names to integer sizes.
indexes : dict-like
Dictionary of indexes associated with variables.
indexers : dict
Dictionary with keys given by dimension names and values given by
arrays of coordinates tick labels. Any mis-matched coordinate values
will be filled in with NaN, and any mis-matched dimension names will
simply be ignored.
method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional
Method to use for filling index values in ``indexers`` not found in
this dataset:
* None (default): don't fill gaps
* pad / ffill: propagate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value
tolerance : optional
Maximum distance between original and new labels for inexact matches.
The values of the index at the matching locations must satisfy the
equation ``abs(index[indexer] - target) <= tolerance``.
copy : bool, optional
If ``copy=True``, data in the return values is always copied. If
``copy=False`` and reindexing is unnecessary, or can be performed
with only slice operations, then the output may share memory with
the input. In either case, new xarray objects are always returned.
fill_value : scalar, optional
Value to use for newly missing values
sparse: bool, optional
Use an sparse-array
Returns
-------
reindexed : dict
Dict of reindexed variables.
new_indexes : dict
Dict of indexes associated with the reindexed variables.
"""
from .dataarray import DataArray
# create variables for the new dataset
reindexed: Dict[Hashable, Variable] = {}
# build up indexers for assignment along each dimension
int_indexers = {}
new_indexes = dict(indexes)
masked_dims = set()
unchanged_dims = set()
for dim, indexer in indexers.items():
if isinstance(indexer, DataArray) and indexer.dims != (dim,):
raise ValueError(
"Indexer has dimensions {:s} that are different "
"from that to be indexed along {:s}".format(str(indexer.dims), dim)
)
target = new_indexes[dim] = utils.safe_cast_to_index(indexers[dim])
if dim in indexes:
index = indexes[dim]
if not index.is_unique:
raise ValueError(
"cannot reindex or align along dimension %r because the "
"index has duplicate values" % dim
)
int_indexer = get_indexer_nd(index, target, method, tolerance)
# We uses negative values from get_indexer_nd to signify
# values that are missing in the index.
if (int_indexer < 0).any():
masked_dims.add(dim)
elif np.array_equal(int_indexer, np.arange(len(index))):
unchanged_dims.add(dim)
int_indexers[dim] = int_indexer
if dim in variables:
var = variables[dim]
args: tuple = (var.attrs, var.encoding)
else:
args = ()
reindexed[dim] = IndexVariable((dim,), target, *args)
for dim in sizes:
if dim not in indexes and dim in indexers:
existing_size = sizes[dim]
new_size = indexers[dim].size
if existing_size != new_size:
raise ValueError(
"cannot reindex or align along dimension %r without an "
"index because its size %r is different from the size of "
"the new index %r" % (dim, existing_size, new_size)
)
for name, var in variables.items():
if name not in indexers:
if sparse:
var = var._as_sparse(fill_value=fill_value)
key = tuple(
slice(None) if d in unchanged_dims else int_indexers.get(d, slice(None))
for d in var.dims
)
needs_masking = any(d in masked_dims for d in var.dims)
if needs_masking:
new_var = var._getitem_with_mask(key, fill_value=fill_value)
elif all(is_full_slice(k) for k in key):
# no reindexing necessary
# here we need to manually deal with copying data, since
# we neither created a new ndarray nor used fancy indexing
new_var = var.copy(deep=copy)
else:
new_var = var[key]
reindexed[name] = new_var
return reindexed, new_indexes
def _get_broadcast_dims_map_common_coords(args, exclude):
common_coords = {}
dims_map = {}
for arg in args:
for dim in arg.dims:
if dim not in common_coords and dim not in exclude:
dims_map[dim] = arg.sizes[dim]
if dim in arg.coords:
common_coords[dim] = arg.coords[dim].variable
return dims_map, common_coords
def _broadcast_helper(arg, exclude, dims_map, common_coords):
from .dataarray import DataArray
from .dataset import Dataset
def _set_dims(var):
# Add excluded dims to a copy of dims_map
var_dims_map = dims_map.copy()
for dim in exclude:
with suppress(ValueError):
# ignore dim not in var.dims
var_dims_map[dim] = var.shape[var.dims.index(dim)]
return var.set_dims(var_dims_map)
def _broadcast_array(array):
data = _set_dims(array.variable)
coords = dict(array.coords)
coords.update(common_coords)
return DataArray(data, coords, data.dims, name=array.name, attrs=array.attrs)
def _broadcast_dataset(ds):
data_vars = {k: _set_dims(ds.variables[k]) for k in ds.data_vars}
coords = dict(ds.coords)
coords.update(common_coords)
return Dataset(data_vars, coords, ds.attrs)
if isinstance(arg, DataArray):
return _broadcast_array(arg)
elif isinstance(arg, Dataset):
return _broadcast_dataset(arg)
else:
raise ValueError("all input must be Dataset or DataArray objects")
def broadcast(*args, exclude=None):
"""Explicitly broadcast any number of DataArray or Dataset objects against
one another.
xarray objects automatically broadcast against each other in arithmetic
operations, so this function should not be necessary for normal use.
If no change is needed, the input data is returned to the output without
being copied.
Parameters
----------
*args : DataArray or Dataset
Arrays to broadcast against each other.
exclude : sequence of str, optional
Dimensions that must not be broadcasted
Returns
-------
broadcast : tuple of DataArray or tuple of Dataset
The same data as the input arrays, but with additional dimensions
inserted so that all data arrays have the same dimensions and shape.
Examples
--------
Broadcast two data arrays against one another to fill out their dimensions:
>>> a = xr.DataArray([1, 2, 3], dims="x")
>>> b = xr.DataArray([5, 6], dims="y")
>>> a
<xarray.DataArray (x: 3)>
array([1, 2, 3])
Coordinates:
* x (x) int64 0 1 2
>>> b
<xarray.DataArray (y: 2)>
array([5, 6])
Coordinates:
* y (y) int64 0 1
>>> a2, b2 = xr.broadcast(a, b)
>>> a2
<xarray.DataArray (x: 3, y: 2)>
array([[1, 1],
[2, 2],
[3, 3]])
Coordinates:
* x (x) int64 0 1 2
* y (y) int64 0 1
>>> b2
<xarray.DataArray (x: 3, y: 2)>
array([[5, 6],
[5, 6],
[5, 6]])
Coordinates:
* y (y) int64 0 1
* x (x) int64 0 1 2
Fill out the dimensions of all data variables in a dataset:
>>> ds = xr.Dataset({"a": a, "b": b})
>>> (ds2,) = xr.broadcast(ds) # use tuple unpacking to extract one dataset
>>> ds2
<xarray.Dataset>
Dimensions: (x: 3, y: 2)
Coordinates:
* x (x) int64 0 1 2
* y (y) int64 0 1
Data variables:
a (x, y) int64 1 1 2 2 3 3
b (x, y) int64 5 6 5 6 5 6
"""
if exclude is None:
exclude = set()
args = align(*args, join="outer", copy=False, exclude=exclude)
dims_map, common_coords = _get_broadcast_dims_map_common_coords(args, exclude)
result = []
for arg in args:
result.append(_broadcast_helper(arg, exclude, dims_map, common_coords))
return tuple(result)
| 32.830853 | 88 | 0.577116 |
6075eb83fd46e75d809f2af739f106b72336d96a | 221 | py | Python | topCoder/srms/600s/srm606/div2/ellys_substring_sorter.py | gauravsingh58/algo | 397859a53429e7a585e5f6964ad24146c6261326 | [
"WTFPL"
] | 1 | 2020-09-30T19:53:08.000Z | 2020-09-30T19:53:08.000Z | topCoder/srms/600s/srm606/div2/ellys_substring_sorter.py | gauravsingh58/algo | 397859a53429e7a585e5f6964ad24146c6261326 | [
"WTFPL"
] | null | null | null | topCoder/srms/600s/srm606/div2/ellys_substring_sorter.py | gauravsingh58/algo | 397859a53429e7a585e5f6964ad24146c6261326 | [
"WTFPL"
] | 1 | 2020-10-15T09:10:57.000Z | 2020-10-15T09:10:57.000Z | class EllysSubstringSorter:
def getMin(self, S, L):
m = 'Z' * len(S)
for i in xrange(len(S)-L+1):
s = S[:i] + ''.join(sorted(S[i:i+L])) + S[i+L:]
m = min(m, s)
return m
| 27.625 | 59 | 0.438914 |
54dc6a720ce8510e8c29c93c73551cb92c204b64 | 784 | py | Python | quiz_brain.py | jonidakolgjini/quiz_game | eae9a197f6831f0413b3f9e0355ca0ce4b7d95a2 | [
"MIT"
] | null | null | null | quiz_brain.py | jonidakolgjini/quiz_game | eae9a197f6831f0413b3f9e0355ca0ce4b7d95a2 | [
"MIT"
] | null | null | null | quiz_brain.py | jonidakolgjini/quiz_game | eae9a197f6831f0413b3f9e0355ca0ce4b7d95a2 | [
"MIT"
] | null | null | null | import html
class QuizBrain:
def __init__(self, q_list):
self.question_number = 0
self.score = 0
self.question_list = q_list
self.current_question = None
def still_has_questions(self):
return self.question_number < len(self.question_list)
def next_question(self):
self.current_question = self.question_list[self.question_number]
self.question_number += 1
q_text = html.unescape(self.current_question.text)
return f"Q.{self.question_number}: {q_text}"
def check_answer(self, user_answer):
correct_answer = self.current_question.answer
if user_answer.lower() == correct_answer.lower():
self.score += 1
return True
else:
return False
| 29.037037 | 72 | 0.645408 |
4215db8b0fd4cb2956409f375e222c2861275f0f | 6,720 | py | Python | dc_console_sdk/model/health_assessment/health_assessment_rule_version_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | 5 | 2019-07-31T04:11:05.000Z | 2021-01-07T03:23:20.000Z | dc_console_sdk/model/health_assessment/health_assessment_rule_version_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | dc_console_sdk/model/health_assessment/health_assessment_rule_version_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: health_assessment_rule_version.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from dc_console_sdk.model.health_assessment import health_assessment_event_score_config_item_pb2 as dc__console__sdk_dot_model_dot_health__assessment_dot_health__assessment__event__score__config__item__pb2
from dc_console_sdk.model.health_assessment import health_assessment_related_resource_score_config_item_pb2 as dc__console__sdk_dot_model_dot_health__assessment_dot_health__assessment__related__resource__score__config__item__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='health_assessment_rule_version.proto',
package='health_assessment',
syntax='proto3',
serialized_options=_b('ZKgo.easyops.local/contracts/protorepo-models/easyops/model/health_assessment'),
serialized_pb=_b('\n$health_assessment_rule_version.proto\x12\x11health_assessment\x1aVdc_console_sdk/model/health_assessment/health_assessment_event_score_config_item.proto\x1a\x61\x64\x63_console_sdk/model/health_assessment/health_assessment_related_resource_score_config_item.proto\"\xc6\x02\n\x1bHealthAssessmentRuleVersion\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0e\n\x06ruleId\x18\x02 \x01(\t\x12\x10\n\x08objectId\x18\x03 \x01(\t\x12Q\n\x10\x65ventScoreConfig\x18\x04 \x03(\x0b\x32\x37.health_assessment.HealthAssessmentEventScoreConfigItem\x12\x65\n\x1arelatedResourceScoreConfig\x18\x05 \x03(\x0b\x32\x41.health_assessment.HealthAssessmentRelatedResourceScoreConfigItem\x12\x18\n\x10\x65ventScoreWeight\x18\x06 \x01(\x05\x12\x1d\n\x15relatedResourceWeight\x18\x07 \x01(\x05\x42MZKgo.easyops.local/contracts/protorepo-models/easyops/model/health_assessmentb\x06proto3')
,
dependencies=[dc__console__sdk_dot_model_dot_health__assessment_dot_health__assessment__event__score__config__item__pb2.DESCRIPTOR,dc__console__sdk_dot_model_dot_health__assessment_dot_health__assessment__related__resource__score__config__item__pb2.DESCRIPTOR,])
_HEALTHASSESSMENTRULEVERSION = _descriptor.Descriptor(
name='HealthAssessmentRuleVersion',
full_name='health_assessment.HealthAssessmentRuleVersion',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instanceId', full_name='health_assessment.HealthAssessmentRuleVersion.instanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ruleId', full_name='health_assessment.HealthAssessmentRuleVersion.ruleId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='objectId', full_name='health_assessment.HealthAssessmentRuleVersion.objectId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eventScoreConfig', full_name='health_assessment.HealthAssessmentRuleVersion.eventScoreConfig', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='relatedResourceScoreConfig', full_name='health_assessment.HealthAssessmentRuleVersion.relatedResourceScoreConfig', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eventScoreWeight', full_name='health_assessment.HealthAssessmentRuleVersion.eventScoreWeight', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='relatedResourceWeight', full_name='health_assessment.HealthAssessmentRuleVersion.relatedResourceWeight', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=247,
serialized_end=573,
)
_HEALTHASSESSMENTRULEVERSION.fields_by_name['eventScoreConfig'].message_type = dc__console__sdk_dot_model_dot_health__assessment_dot_health__assessment__event__score__config__item__pb2._HEALTHASSESSMENTEVENTSCORECONFIGITEM
_HEALTHASSESSMENTRULEVERSION.fields_by_name['relatedResourceScoreConfig'].message_type = dc__console__sdk_dot_model_dot_health__assessment_dot_health__assessment__related__resource__score__config__item__pb2._HEALTHASSESSMENTRELATEDRESOURCESCORECONFIGITEM
DESCRIPTOR.message_types_by_name['HealthAssessmentRuleVersion'] = _HEALTHASSESSMENTRULEVERSION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
HealthAssessmentRuleVersion = _reflection.GeneratedProtocolMessageType('HealthAssessmentRuleVersion', (_message.Message,), {
'DESCRIPTOR' : _HEALTHASSESSMENTRULEVERSION,
'__module__' : 'health_assessment_rule_version_pb2'
# @@protoc_insertion_point(class_scope:health_assessment.HealthAssessmentRuleVersion)
})
_sym_db.RegisterMessage(HealthAssessmentRuleVersion)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 56.470588 | 882 | 0.813393 |
912b7e2826ceb060e1eb0ca6ff2f083b0dd2c6d5 | 4,709 | py | Python | lambda/tests/integration/test_formSubmit_email.py | epicfaace/GCMW | b06b41c1629866e334c8b2cd94d008bbc2028cdf | [
"Apache-2.0"
] | 2 | 2021-08-09T07:50:37.000Z | 2022-03-15T17:23:37.000Z | lambda/tests/integration/test_formSubmit_email.py | ksankara/CFF | 70f10bb7abc0f381fa309379a85d2782ed6c36b8 | [
"Apache-2.0"
] | 114 | 2019-06-20T15:27:13.000Z | 2020-08-30T21:15:56.000Z | lambda/tests/integration/test_formSubmit_email.py | epicfaace/GCMW | b06b41c1629866e334c8b2cd94d008bbc2028cdf | [
"Apache-2.0"
] | 7 | 2019-07-20T06:11:19.000Z | 2020-06-28T15:23:14.000Z | """
npm test tests.integration.test_formSubmit_email
"""
import copy
import unittest
from chalice.config import Config
import json
from app import app
from tests.integration.baseTestCase import BaseTestCase
from tests.integration.constants import _
from chalicelib.models import Form, User, Response, CCAvenueConfig, serialize_model
from bson.objectid import ObjectId
import time
from unittest import mock
import uuid
class FormSubmitEmail(BaseTestCase):
def test_submit_notpaid_no_email(self):
formId = self.create_form()
self.edit_form(
formId,
{
"schema": {"a": "B"},
"uiSchema": {"a": "B"},
"formOptions": {
"paymentInfo": {
"currency": "USD",
"items": [
{
"name": "a",
"description": "a",
"amount": "1",
"quantity": "1",
}
],
},
"confirmationEmailInfo": {"toField": "email"},
},
},
)
responseId, submit_res = self.submit_form(
formId, {"email": "success@simulator.amazonses.com"}
)
self.assertEqual(submit_res["paid"], False)
self.assertEqual(submit_res["email_sent"], False)
_, submit_res = self.submit_form(
formId, {"email": "success@simulator.amazonses.com"}, responseId=responseId
)
self.assertEqual(submit_res["paid"], False)
self.assertEqual(submit_res["email_sent"], False)
def test_submit_paid_send_email(self):
formId = self.create_form()
self.edit_form(
formId,
{
"schema": {"a": "B"},
"uiSchema": {"a": "B"},
"formOptions": {
"paymentInfo": {
"currency": "USD",
"items": [
{
"name": "a",
"description": "a",
"amount": "0",
"quantity": "0",
}
],
},
"confirmationEmailInfo": {"toField": "email"},
},
},
)
responseId, submit_res = self.submit_form(
formId, {"email": "success@simulator.amazonses.com"}
)
self.assertEqual(submit_res["paid"], True)
self.assertEqual(submit_res["email_sent"], True)
_, submit_res = self.submit_form(
formId, {"email": "success@simulator.amazonses.com"}, responseId=responseId
)
self.assertEqual(submit_res["paid"], True)
self.assertEqual(submit_res["email_sent"], True)
_, submit_res = self.submit_form(
formId,
{"email": "success@simulator.amazonses.com"},
responseId=responseId,
submitOptions={"sendEmail": True},
)
self.assertEqual(submit_res["paid"], True)
self.assertEqual(submit_res["email_sent"], True)
def test_submit_paid_disable_send_email(self):
formId = self.create_form()
self.edit_form(
formId,
{
"schema": {"a": "B"},
"uiSchema": {"a": "B"},
"formOptions": {
"paymentInfo": {
"currency": "USD",
"items": [
{
"name": "a",
"description": "a",
"amount": "0",
"quantity": "0",
}
],
},
"confirmationEmailInfo": {"toField": "email"},
},
},
)
responseId, submit_res = self.submit_form(
formId,
{"email": "success@simulator.amazonses.com"},
submitOptions={"sendEmail": False},
)
self.assertEqual(submit_res["paid"], True)
self.assertEqual(submit_res["email_sent"], False)
_, submit_res = self.submit_form(
formId,
{"email": "success@simulator.amazonses.com"},
responseId=responseId,
submitOptions={"sendEmail": False},
)
self.assertEqual(submit_res["paid"], True)
self.assertEqual(submit_res["email_sent"], False)
| 34.372263 | 87 | 0.452962 |
b03aa145f32ce7d3ccad2582b8cc349d75ada3e8 | 897 | py | Python | src/generate_blacklist.py | user8446/BarbBlock | 06fe3571717f678b306c7a41282b21ee4ccd9921 | [
"MIT"
] | 685 | 2017-08-12T13:47:13.000Z | 2022-03-02T22:32:23.000Z | src/generate_blacklist.py | user8446/BarbBlock | 06fe3571717f678b306c7a41282b21ee4ccd9921 | [
"MIT"
] | 35 | 2017-08-12T12:42:20.000Z | 2021-08-16T19:32:16.000Z | src/generate_blacklist.py | user8446/BarbBlock | 06fe3571717f678b306c7a41282b21ee4ccd9921 | [
"MIT"
] | 36 | 2017-08-12T14:57:13.000Z | 2022-01-12T04:32:58.000Z | import argparse
import os
import yaml
import jinja2
def run_template_engine(blacklist_file, template_file, output_file):
template_path, template_file = os.path.split(template_file)
with open(blacklist_file) as bf:
context = yaml.load(bf)
context['domains'] = [d for blockset in context['blacklist']
for d in blockset['domains']]
result = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_path or './')
).get_template(template_file).render(context)
with open(output_file, 'w') as of:
of.write(result)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('blacklist')
parser.add_argument('template')
parser.add_argument('outfile')
args = parser.parse_args()
run_template_engine(args.blacklist, args.template, args.outfile)
if __name__ == '__main__':
main()
| 24.243243 | 68 | 0.683389 |
d8f73ff2c9f288851d97f75ec1e080f217012d7f | 1,649 | py | Python | Data/split_sdf.py | ArqiSoft/ml-services | 0c9beacc4a98c3f55ed56969a8b7eb84c4209c21 | [
"MIT"
] | null | null | null | Data/split_sdf.py | ArqiSoft/ml-services | 0c9beacc4a98c3f55ed56969a8b7eb84c4209c21 | [
"MIT"
] | null | null | null | Data/split_sdf.py | ArqiSoft/ml-services | 0c9beacc4a98c3f55ed56969a8b7eb84c4209c21 | [
"MIT"
] | 2 | 2018-12-22T13:46:31.000Z | 2019-06-18T16:46:08.000Z |
from sklearn import model_selection
def extract_sample_mols(input_file, n = 1000, valuename = ''):
counter = 1
values_list = []
mol_numbers = []
with open(input_file, "r") as infile:
for line in infile:
if valuename in line:
values_list.append(next(infile, '').strip())
if line[:4] == '$$$$':
mol_numbers.append(counter)
counter +=1
print(len(mol_numbers), len(values_list))
x_train, x_test, y_train, y_test = model_selection.train_test_split(
mol_numbers,
values_list,
test_size=n,
random_state=42,
shuffle=True)
valid_list = x_test
return valid_list
def write_sample_sdf(input_file, valid_list):
sample_file=open(str(input_file[:-4]) + '_sample.sdf', 'w')
mol = []
i=0
for line in open(input_file):
mol.append(line)
if line[:4] == '$$$$':
i+=1
if i in valid_list:
for mol_line in mol:
sample_file.write(mol_line)
valid_list.remove(i)
mol = []
else:
mol = []
sample_file.close()
return
def extract_sample_dataset(input_file, n, valuename):
valuename = '<' + valuename + '>'
valid_list = extract_sample_mols(input_file=input_file, n = n, valuename = valuename)
write_sample_sdf(input_file=input_file, valid_list=valid_list)
return
input_file = '../test_in/OPERA/QSAR_ready_Curated_3_4STAR_LogP_train.sdf'
extract_sample_dataset(input_file=input_file, n=1000, valuename='Kow') | 26.174603 | 89 | 0.585203 |
8849fed799f3a275adaff61f983f45cc05260c8f | 18,357 | py | Python | python/pynessie/cli.py | thadguidry/nessie | 9ce6d6e610bff29a2cfb27d577063c9ca9e84a7a | [
"Apache-2.0"
] | null | null | null | python/pynessie/cli.py | thadguidry/nessie | 9ce6d6e610bff29a2cfb27d577063c9ca9e84a7a | [
"Apache-2.0"
] | null | null | null | python/pynessie/cli.py | thadguidry/nessie | 9ce6d6e610bff29a2cfb27d577063c9ca9e84a7a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Console script for nessie_client."""
import datetime
import sys
from collections import defaultdict
from typing import Any
from typing import Dict
from typing import Generator
from typing import List
from typing import Tuple
import attr
import click
import confuse
from click import Option
from click import UsageError
from dateutil.tz import tzlocal
from . import __version__
from ._log import show_log
from ._ref import handle_branch_tag
from .conf import build_config
from .conf import process
from .error import error_handler
from .error import NessieNotFoundException
from .model import CommitMeta
from .model import CommitMetaSchema
from .model import Contents
from .model import ContentsSchema
from .model import Delete
from .model import Entries
from .model import Entry
from .model import EntrySchema
from .model import MultiContents
from .model import Put
from .nessie_client import _contents_key
from .nessie_client import _format_key
from .nessie_client import NessieClient
@attr.s
class ContextObject(object):
"""Click context object."""
nessie = attr.ib(NessieClient)
verbose = attr.ib(bool)
json = attr.ib(bool)
pass_client = click.make_pass_decorator(ContextObject)
def _print_version(ctx: Any, param: Any, value: Any) -> None:
if not value or ctx.resilient_parsing:
return
click.echo("nessie version " + __version__)
ctx.exit()
class MutuallyExclusiveOption(Option):
"""Only allow one option in a list to be set at once."""
def __init__(self: "MutuallyExclusiveOption", *args: List, **kwargs: Dict) -> None:
"""Instantiated a mutually exclusive option."""
self.mutually_exclusive = set(kwargs.pop("mutually_exclusive", []))
super(MutuallyExclusiveOption, self).__init__(*args, **kwargs)
def handle_parse_result(self: "MutuallyExclusiveOption", ctx: click.Context, opts: Dict, args: List) -> Tuple:
"""Ensure mutually exclusive options are not used together."""
if self.mutually_exclusive.intersection(opts) and self.name in opts:
raise UsageError(
"Illegal usage: `{}` is mutually exclusive with " "arguments `{}`.".format(self.name, ", ".join(self.mutually_exclusive))
)
return super(MutuallyExclusiveOption, self).handle_parse_result(ctx, opts, args)
class DefaultHelp(click.Command):
"""If no options are presented show help."""
def __init__(self: "DefaultHelp", *args: List, **kwargs: Dict) -> None:
"""Ensure that help is shown if nothing else is selected."""
context_settings = kwargs.setdefault("context_settings", {})
if "help_option_names" not in context_settings:
context_settings["help_option_names"] = ["-h", "--help"]
self.help_flag = context_settings["help_option_names"][0]
super(DefaultHelp, self).__init__(*args, **kwargs)
def parse_args(self: "DefaultHelp", ctx: click.Context, args: List) -> List:
"""Ensure that help is shown if nothing else is selected."""
if not args:
args = [self.help_flag]
return super(DefaultHelp, self).parse_args(ctx, args)
@click.group()
@click.option("--json", is_flag=True, help="write output in json format.")
@click.option("-v", "--verbose", is_flag=True, help="Verbose output.")
@click.option("--endpoint", help="Optional endpoint, if different from config file.")
@click.option("--version", is_flag=True, callback=_print_version, expose_value=False, is_eager=True)
@click.pass_context
def cli(ctx: click.core.Context, json: bool, verbose: bool, endpoint: str) -> None:
"""Nessie cli tool.
Interact with Nessie branches and tables via the command line
"""
try:
config = build_config({"endpoint": endpoint} if endpoint else None)
nessie = NessieClient(config)
ctx.obj = ContextObject(nessie, verbose, json)
except confuse.exceptions.ConfigTypeError as e:
raise click.ClickException(str(e))
@cli.group()
@pass_client
def remote(ctx: ContextObject) -> None:
"""Set and view remote endpoint."""
pass
@cli.command("config", cls=DefaultHelp)
@click.option("--get", cls=MutuallyExclusiveOption, help="get config parameter", mutually_exclusive=["set", "list", "unset"])
@click.option("--add", cls=MutuallyExclusiveOption, help="set config parameter", mutually_exclusive=["get", "list", "unset"])
@click.option(
"-l",
"--list",
cls=MutuallyExclusiveOption,
is_flag=True,
help="list config parameters",
mutually_exclusive=["set", "get", "unset"],
)
@click.option("--unset", cls=MutuallyExclusiveOption, help="unset config parameter", mutually_exclusive=["get", "list", "set"])
@click.option("--type", help="type to interpret config value to set or get. Allowed options: bool, int")
@click.argument("key", nargs=1, required=False)
@pass_client
@error_handler
def config(ctx: ContextObject, get: str, add: str, list: bool, unset: str, type: str, key: str) -> None:
"""Set and view config."""
res = process(get, add, list, unset, key, type)
click.echo(res)
@remote.command("show")
@pass_client
@error_handler
def show(ctx: ContextObject) -> None:
"""Show current remote."""
click.echo("Remote URL: " + ctx.nessie._base_url)
click.echo("Default branch: " + ctx.nessie.get_reference(None).name)
click.echo("Remote branches: ")
for i in ctx.nessie.list_references():
click.echo("\t" + i.name)
@remote.command(name="add")
@click.argument("endpoint", nargs=1, required=True)
@pass_client
@error_handler
def set_(ctx: ContextObject, endpoint: str) -> None:
"""Set current remote."""
click.echo(process(None, "endpoint", False, None, endpoint))
@remote.command("set-head")
@click.argument("head", nargs=1, required=True)
@click.option("-d", "--delete", is_flag=True, help="delete the default branch")
@pass_client
@error_handler
def set_head(ctx: ContextObject, head: str, delete: bool) -> None:
"""Set current default branch. If -d is passed it will remove the default branch."""
if delete:
click.echo(process(None, None, False, "default_branch", None))
else:
click.echo(process(None, "default_branch", False, None, head))
@cli.command("log")
@click.option("-n", "--number", help="number of log entries to return", type=int, default=-1)
@click.option("--since", "--after", help="Commits more recent than specific date")
@click.option("--until", "--before", help="Commits older than specific date")
@click.option("--author", "--committer", is_flag=True, help="limit commits to specific committer")
@click.argument("revision_range", nargs=1, required=False)
@click.argument("paths", nargs=-1, type=click.Path(exists=False), required=False)
@pass_client
@error_handler
def log(ctx: ContextObject, number: int, since: str, until: str, author: str, revision_range: str, paths: Tuple[click.Path]) -> None:
"""Show commit log.
REVISION_RANGE optional branch, tag or hash to start viewing log from. If of the form <hash>..<hash> only show log
for given range
PATHS optional list of paths. If given, only show commits which affected the given paths
"""
if not revision_range:
start = ctx.nessie.get_default_branch()
end = None
else:
if ".." in revision_range:
start, end = revision_range.split("..")
else:
start = revision_range
end = None
log_result = show_log(ctx.nessie, start, number, since, until, author, end, paths)
if ctx.json:
click.echo(CommitMetaSchema().dumps(log_result, many=True))
else:
click.echo_via_pager(_format_log_result(x) for x in log_result)
def _format_log_result(x: CommitMeta) -> str:
result = click.style("commit {}\n".format(x.hash_), fg="yellow")
result += click.style("Author: {} <{}>\n".format(x.commiter, x.email))
result += click.style("Date: {}\n".format(_format_time(x.commitTime)))
result += click.style("\n\t{}\n\n".format(x.message))
return result
def _format_time(epoch: int) -> str:
dt = datetime.datetime.fromtimestamp(epoch / 1000, datetime.timezone.utc)
return dt.astimezone(tzlocal()).strftime("%c %z")
@cli.command(name="branch")
@click.option("-l", "--list", cls=MutuallyExclusiveOption, is_flag=True, help="list branches", mutually_exclusive=["delete"])
@click.option("-d", "--delete", cls=MutuallyExclusiveOption, is_flag=True, help="delete a branch", mutually_exclusive=["list"])
@click.option("-f", "--force", is_flag=True, help="force branch assignment")
@click.option("-c", "--condition", help="Conditional Hash. Only perform the action if branch currently points to condition.")
@click.argument("branch", nargs=1, required=False)
@click.argument("new_branch", nargs=1, required=False)
@pass_client
@error_handler
def branch_(
ctx: ContextObject,
list: bool,
force: bool,
delete: bool,
branch: str,
new_branch: str,
condition: str,
) -> None:
"""Branch operations.
BRANCH name of branch to list or create/assign
NEW_BRANCH name of branch to assign from or rename to
Examples:
nessie branch -l -> list all branches
nessie branch -l main -> list only main
nessie branch -d main -> delete main
nessie branch -> list all branches
nessie branch main -> create branch main at current head
nessie branch main test -> create branch main at head of test
nessie branch -f main test -> assign main to head of test
"""
results = handle_branch_tag(ctx.nessie, list, delete, branch, new_branch, True, ctx.json, force, ctx.verbose, condition)
if ctx.json:
click.echo(results)
elif results:
click.echo(results)
else:
click.echo()
@cli.command("tag")
@click.option("-l", "--list", cls=MutuallyExclusiveOption, is_flag=True, help="list branches", mutually_exclusive=["delete"])
@click.option("-d", "--delete", cls=MutuallyExclusiveOption, is_flag=True, help="delete a branches", mutually_exclusive=["list"])
@click.option("-f", "--force", is_flag=True, help="force branch assignment")
@click.option("-c", "--condition", help="Conditional Hash. Only perform the action if branch currently points to condition.")
@click.argument("tag_name", nargs=1, required=False)
@click.argument("new_tag", nargs=1, required=False)
@pass_client
@error_handler
def tag(ctx: ContextObject, list: bool, force: bool, delete: bool, tag_name: str, new_tag: str, condition: str) -> None:
"""Tag operations.
TAG_NAME name of branch to list or create/assign
NEW_TAG name of branch to assign from or rename to
Examples:
nessie tag -l -> list all tags
nessie tag -l main -> list only main
nessie tag -d main -> delete main
nessie tag -> list all tags
nessie tag main -> create tag xxx at current head
nessie tag main test -> create tag xxx at head of test
nessie tag -f main test -> assign xxx to head of test
"""
results = handle_branch_tag(ctx.nessie, list, delete, tag_name, new_tag, False, ctx.json, force, ctx.verbose, condition)
if ctx.json:
click.echo(results)
elif results:
click.echo(results)
else:
click.echo()
@cli.command("merge")
@click.option("-b", "--branch", help="branch to merge onto. If not supplied the default branch from config is used")
@click.argument("merge_branch", nargs=1, required=False)
@click.option(
"-f",
"--force",
cls=MutuallyExclusiveOption,
is_flag=True,
mutually_exclusive=["condition"],
help="force branch assignment",
)
@click.option(
"-c",
"--condition",
cls=MutuallyExclusiveOption,
mutually_exclusive=["force"],
help="Conditional Hash. Only perform the action if branch currently points to condition.",
)
@pass_client
@error_handler
def merge(ctx: ContextObject, branch: str, force: bool, condition: str, merge_branch: str) -> None:
"""Merge BRANCH into current branch. BRANCH can be a hash or branch."""
if not force and not condition:
raise UsageError(
"""Either condition or force must be set. Condition should be set to a valid hash for concurrency
control or force to ignore current state of Nessie Store."""
)
ctx.nessie.merge(branch if branch else ctx.nessie.get_default_branch(), merge_branch, condition)
click.echo()
@cli.command("cherry-pick")
@click.option("-b", "--branch", help="branch to cherry-pick onto. If not supplied the default branch from config is used")
@click.option(
"-f",
"--force",
cls=MutuallyExclusiveOption,
is_flag=True,
mutually_exclusive=["condition"],
help="force branch assignment",
)
@click.option(
"-c",
"--condition",
cls=MutuallyExclusiveOption,
mutually_exclusive=["force"],
help="Conditional Hash. Only perform the action if branch currently points to condition.",
)
@click.argument("hashes", nargs=-1, required=False)
@pass_client
@error_handler
def cherry_pick(ctx: ContextObject, branch: str, force: bool, condition: str, hashes: Tuple[str]) -> None:
"""Transplant HASHES onto current branch."""
if not force and not condition:
raise UsageError(
"""Either condition or force must be set. Condition should be set to a valid hash for concurrency
control or force to ignore current state of Nessie Store."""
)
ctx.nessie.cherry_pick(branch if branch else ctx.nessie.get_default_branch(), condition, *hashes)
click.echo()
@cli.command("contents")
@click.option(
"-l",
"--list",
cls=MutuallyExclusiveOption,
is_flag=True,
help="list tables",
mutually_exclusive=["delete", "set"],
)
@click.option(
"-d",
"--delete",
cls=MutuallyExclusiveOption,
is_flag=True,
help="delete a table",
mutually_exclusive=["list", "set"],
)
@click.option(
"-s",
"--set",
cls=MutuallyExclusiveOption,
is_flag=True,
help="modify a table",
mutually_exclusive=["list", "delete"],
)
@click.option(
"-c",
"--condition",
help="Conditional Hash. Only perform the action if branch currently points to condition.",
)
@click.option("-r", "--ref", help="branch to list from. If not supplied the default branch from config is used")
@click.option("-m", "--message", help="commit message")
@click.argument("key", nargs=-1, required=False)
@pass_client
@error_handler
def contents(ctx: ContextObject, list: bool, delete: bool, set: bool, key: List[str], ref: str, message: str, condition: str) -> None:
"""Contents operations.
KEY name of object to view, delete. If listing the key will limit by namespace what is included.
"""
if list:
keys = ctx.nessie.list_keys(ref if ref else ctx.nessie.get_default_branch())
results = EntrySchema().dumps(_format_keys_json(keys, *key), many=True) if ctx.json else _format_keys(keys, *key)
elif delete:
ctx.nessie.commit(ref, _get_contents(ctx.nessie, ref, delete, *key), old_hash=condition, reason=_get_message(message))
results = ""
elif set:
ctx.nessie.commit(ref, _get_contents(ctx.nessie, ref, delete, *key), old_hash=condition, reason=_get_message(message))
results = ""
else:
def content(*x: str) -> Generator[Contents, Any, None]:
return ctx.nessie.get_values(ref if ref else ctx.nessie.get_default_branch(), *x)
results = ContentsSchema().dumps(content(*key), many=True) if ctx.json else (i.pretty_print() for i in content(*key))
if ctx.json or not results:
click.echo(results)
else:
click.echo(results)
def _get_contents(nessie: NessieClient, ref: str, delete: bool = False, *keys: str) -> MultiContents:
contents_altered = list()
for raw_key in keys:
key = _format_key(raw_key)
try:
content = nessie.get_values(ref, key)
content_json = ContentsSchema().dumps(content.__next__())
except NessieNotFoundException:
content_json = click.get_text_stream("stdin").read()
MARKER = "# Everything below is ignored\n"
if delete:
message = "\n\n" + MARKER
else:
edit_message = (
content_json
+ "\n\n"
+ MARKER
+ "Edit the content above to commit changes."
+ " Closing without change will result in a no-op."
+ " Removing the content results in a delete"
)
try:
message = click.edit(edit_message)
except click.ClickException:
message = edit_message
if message is not None:
message_altered = message.split(MARKER, 1)[0].strip("\n")
if message_altered:
contents_altered.append(Put(_contents_key(raw_key), ContentsSchema().loads(message_altered)))
else:
contents_altered.append(Delete(_contents_key(raw_key)))
return MultiContents(contents_altered)
def _get_commit_message(*keys: str) -> str:
MARKER = "# Everything below is ignored\n"
message = click.edit("\n\n" + MARKER + "\n".join(keys))
if message is not None:
return message.split(MARKER, 1)[0].rstrip("\n")
def _get_message(message: str, *keys: str) -> str:
if message:
return message
return _get_commit_message(*keys)
def _format_keys_json(keys: Entries, *expected_keys: str) -> List[Entry]:
results = list()
for k in keys.entries:
value = ['"{}"'.format(i) if "." in i else i for i in k.name.elements]
if expected_keys and all(key for key in expected_keys if key not in value):
continue
results.append(k)
return results
def _format_keys(keys: Entries, *key: str) -> str:
results = defaultdict(list)
result_str = ""
for k in keys.entries:
results[k.kind].append(k.name)
for k in results.keys():
result_str += k + ":\n"
for v in results[k]:
value = ['"{}"'.format(i) if "." in i else i for i in v.elements]
if key and all(k for k in key if k not in value):
continue
result_str += "\t{}\n".format(".".join(value))
return result_str
if __name__ == "__main__":
sys.exit(cli()) # pragma: no cover
| 35.575581 | 137 | 0.669499 |
ded2ff7043f176a7f2b7dced086c33b7396bbd93 | 1,551 | py | Python | test/filter/test_contentfilter.py | thatch/BitSwanPump | 98a5b8d09f9b59d5361611cee0bd45e7b4c69e3f | [
"BSD-3-Clause"
] | 17 | 2019-02-14T09:26:03.000Z | 2022-03-11T09:23:52.000Z | test/filter/test_contentfilter.py | thatch/BitSwanPump | 98a5b8d09f9b59d5361611cee0bd45e7b4c69e3f | [
"BSD-3-Clause"
] | 91 | 2019-05-06T18:59:02.000Z | 2022-01-11T06:22:32.000Z | test/filter/test_contentfilter.py | thatch/BitSwanPump | 98a5b8d09f9b59d5361611cee0bd45e7b4c69e3f | [
"BSD-3-Clause"
] | 10 | 2019-04-23T08:48:58.000Z | 2022-02-13T14:24:28.000Z | import mongoquery
import bspump.filter
import bspump.unittest
class ContentFilterOnHitPassOnly(bspump.filter.ContentFilter):
def on_hit(self, context, event):
return event
def on_miss(self, context, event):
return None
class TestContentFilter(bspump.unittest.ProcessorTestCase):
def test_content_filter(self):
events = [
(None, {"foo": "bar"}),
(None, {"fizz": "buzz"}),
]
self.set_up_processor(bspump.filter.ContentFilter)
output = self.execute(
events
)
self.assertEqual(
[event for context, event in output],
[{"foo": "bar"}, {"fizz": "buzz"}]
)
def test_content_filter_with_query(self):
events = [
(None, {"foo": "bar"}),
(None, {"fizz": "buzz"}),
]
self.set_up_processor(bspump.filter.ContentFilter, query={"foo": "bar"})
output = self.execute(
events
)
self.assertEqual(
[event for context, event in output],
[{"foo": "bar"}, {"fizz": "buzz"}]
)
def test_custom_content_filter_with_query(self):
events = [
(None, {"foo": "bar"}),
(None, {"fizz": "buzz"}),
]
self.set_up_processor(ContentFilterOnHitPassOnly, query={"foo": "bar"})
output = self.execute(
events
)
self.assertEqual(
[event for context, event in output],
[{"foo": "bar"}]
)
def test_content_filter_with_invalid_query(self):
with self.assertRaises(mongoquery.QueryError):
self.set_up_processor(bspump.filter.ContentFilter, query={"$foo": 2})
with self.assertRaises(TypeError):
self.set_up_processor(bspump.filter.ContentFilter, query={"foo": {"$in": None}})
| 21.541667 | 83 | 0.672469 |
d8216b13d65dd42034d3a914fed0bb05d09b63ab | 387 | py | Python | Breeze/migrations/0027_remove_events_temp.py | Breeze19/breeze-backend | e1c2edb29e085edc1fddd119948faefc6acf4ad7 | [
"MIT"
] | 2 | 2018-12-20T13:36:31.000Z | 2018-12-21T19:02:04.000Z | Breeze/migrations/0027_remove_events_temp.py | Breeze19/breeze-backend | e1c2edb29e085edc1fddd119948faefc6acf4ad7 | [
"MIT"
] | null | null | null | Breeze/migrations/0027_remove_events_temp.py | Breeze19/breeze-backend | e1c2edb29e085edc1fddd119948faefc6acf4ad7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2019-01-04 11:55
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Breeze', '0026_auto_20190104_1724'),
]
operations = [
migrations.RemoveField(
model_name='events',
name='temp',
),
]
| 19.35 | 46 | 0.609819 |
09660a13f98a6d7417d0618b518f6abfb6641776 | 2,498 | py | Python | tests/rp_log.py | aguilajesus/dtformats | 91030c4478cb9089e85a52be57ae0b251be6c78f | [
"Apache-2.0"
] | 2 | 2019-07-30T18:52:08.000Z | 2019-08-01T02:22:57.000Z | tests/rp_log.py | aguilajesus/dtformats | 91030c4478cb9089e85a52be57ae0b251be6c78f | [
"Apache-2.0"
] | null | null | null | tests/rp_log.py | aguilajesus/dtformats | 91030c4478cb9089e85a52be57ae0b251be6c78f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Tests for Windows Restore Point rp.log files."""
from __future__ import unicode_literals
import unittest
from dtformats import rp_log
from tests import test_lib
class RestorePointLogFileTest(test_lib.BaseTestCase):
"""Windows Restore Point rp.log file tests."""
# pylint: disable=protected-access
def testDebugPrintFileFooter(self):
"""Tests the _DebugPrintFileFooter function."""
output_writer = test_lib.TestOutputWriter()
test_file = rp_log.RestorePointLogFile(output_writer=output_writer)
data_type_map = test_file._GetDataTypeMap('rp_log_file_footer')
file_footer = data_type_map.CreateStructureValues(
creation_time=1)
test_file._DebugPrintFileFooter(file_footer)
def testDebugPrintFileHeader(self):
"""Tests the _DebugPrintFileHeader function."""
output_writer = test_lib.TestOutputWriter()
test_file = rp_log.RestorePointLogFile(output_writer=output_writer)
data_type_map = test_file._GetDataTypeMap('rp_log_file_header')
file_header = data_type_map.CreateStructureValues(
description='Description'.encode('utf-16-le'),
event_type=1,
restore_point_type=2,
sequence_number=3)
test_file._DebugPrintFileHeader(file_header)
@test_lib.skipUnlessHasTestFile(['rp.log'])
def testReadFileFooter(self):
"""Tests the _ReadFileFooter function."""
output_writer = test_lib.TestOutputWriter()
test_file = rp_log.RestorePointLogFile(output_writer=output_writer)
test_file_path = self._GetTestFilePath(['rp.log'])
with open(test_file_path, 'rb') as file_object:
test_file._file_size = 536
test_file._ReadFileFooter(file_object)
@test_lib.skipUnlessHasTestFile(['rp.log'])
def testReadFileHeader(self):
"""Tests the _ReadFileHeader function."""
output_writer = test_lib.TestOutputWriter()
test_file = rp_log.RestorePointLogFile(output_writer=output_writer)
test_file_path = self._GetTestFilePath(['rp.log'])
with open(test_file_path, 'rb') as file_object:
test_file._ReadFileHeader(file_object)
@test_lib.skipUnlessHasTestFile(['rp.log'])
def testReadFileObject(self):
"""Tests the ReadFileObject function."""
output_writer = test_lib.TestOutputWriter()
test_file = rp_log.RestorePointLogFile(
debug=True, output_writer=output_writer)
test_file_path = self._GetTestFilePath(['rp.log'])
test_file.Open(test_file_path)
if __name__ == '__main__':
unittest.main()
| 31.620253 | 71 | 0.748599 |
77561fce12a288612b57c85b23f10f23184e525b | 2,661 | py | Python | qtc_color_themes/solarizedlight.py | romanrue/qtc-color-themes | 58f7ab3209cb25ec7ac641a8ade35dfdcd524f14 | [
"MIT"
] | null | null | null | qtc_color_themes/solarizedlight.py | romanrue/qtc-color-themes | 58f7ab3209cb25ec7ac641a8ade35dfdcd524f14 | [
"MIT"
] | null | null | null | qtc_color_themes/solarizedlight.py | romanrue/qtc-color-themes | 58f7ab3209cb25ec7ac641a8ade35dfdcd524f14 | [
"MIT"
] | null | null | null | """
A Pygments_ style based on the light background variant of Solarized_.
.. _Pygments: http://pygments.org/
.. _Solarized: http://ethanschoonover.com/solarized
"""
from pygments.style import Style
from pygments.token import Token, Comment, Name, Keyword, Generic, Number, Operator, String
BASE03 = '#002B36'
BASE02 = '#073642'
BASE01 = '#586E75'
BASE00 = '#657B83'
BASE0 = '#839496'
BASE1 = '#93A1A1'
BASE2 = '#EEE8D5'
BASE3 = '#FDF6E3'
YELLOW = '#B58900'
ORANGE = '#CB4B16'
RED = '#DC322F'
MAGENTA = '#D33682'
VIOLET = '#6C71C4'
BLUE = '#268BD2'
CYAN = '#2AA198'
GREEN = '#859900'
TEMP03=BASE03
TEMP02=BASE02
TEMP01=BASE01
TEMP00=BASE00
BASE03=BASE3
BASE02=BASE2
BASE01=BASE1
BASE00=BASE0
BASE0=TEMP00
BASE1=TEMP01
BASE2=TEMP02
BASE3=TEMP03
class SolarizedlightStyle(Style):
color = BASE0
background_color = BASE03
highlight_color = BASE01
styles = {
Keyword: GREEN,
Keyword.Constant: ORANGE,
Keyword.Declaration: BLUE,
#Keyword.Namespace
#Keyword.Pseudo
Keyword.Reserved: BLUE,
Keyword.Type: RED,
#Name
Name.Attribute: BASE1,
Name.Builtin: YELLOW,
Name.Builtin.Pseudo: BLUE,
Name.Class: BLUE,
Name.Constant: ORANGE,
Name.Decorator: BLUE,
Name.Entity: ORANGE,
Name.Exception: ORANGE,
Name.Function: BLUE,
#Name.Label
#Name.Namespace
#Name.Other
Name.Tag: BLUE,
Name.Variable: BLUE,
#Name.Variable.Class
#Name.Variable.Global
#Name.Variable.Instance
#Literal
#Literal.Date
String: CYAN,
String.Backtick: BASE01,
String.Char: CYAN,
String.Doc: CYAN,
#String.Double
String.Escape: ORANGE,
String.Heredoc: BASE1,
#String.Interpol
#String.Other
String.Regex: RED,
#String.Single
#String.Symbol
Number: CYAN,
#Number.Float
#Number.Hex
#Number.Integer
#Number.Integer.Long
#Number.Oct
Operator: GREEN,
#Operator.Word
#Punctuation: ORANGE,
Comment: BASE01,
#Comment.Multiline
Comment.Preproc: GREEN,
#Comment.Single
Comment.Special: GREEN,
#Generic
Generic.Deleted: CYAN,
Generic.Emph: 'italic',
Generic.Error: RED,
Generic.Heading: ORANGE,
Generic.Inserted: GREEN,
#Generic.Output
#Generic.Prompt
Generic.Strong: 'bold',
Generic.Subheading: ORANGE,
#Generic.Traceback
Token: BASE1,
Token.Other: ORANGE,
}
| 22.175 | 91 | 0.601654 |
3bcb37edb64c9072b078c9151694bda0e31d1ee7 | 1,118 | py | Python | verify.py | kartik-joshi/Adversarial-Attack-on-Recurrent-Neural-Network | 2504e35695c4f9305eed35468e4defa881d16c44 | [
"BSD-2-Clause"
] | 24 | 2018-10-12T12:07:42.000Z | 2022-03-18T22:34:45.000Z | verify.py | kartik-joshi/Adversarial-Attack-on-Recurrent-Neural-Network | 2504e35695c4f9305eed35468e4defa881d16c44 | [
"BSD-2-Clause"
] | 2 | 2019-06-14T00:20:22.000Z | 2020-10-28T19:02:56.000Z | verify.py | kartik-joshi/Adversarial-Attack-on-Recurrent-Neural-Network | 2504e35695c4f9305eed35468e4defa881d16c44 | [
"BSD-2-Clause"
] | 5 | 2018-10-12T12:07:43.000Z | 2021-11-12T13:23:11.000Z | ## verify.py -- check the accuracy of a neural network
##
## Copyright (C) 2016, Nicholas Carlini <nicholas@carlini.com>.
##
## This program is licenced under the BSD 2-Clause licence,
## contained in the LICENCE file in this directory.
from setup_cifar import CIFAR, CIFARModel
from setup_mnist import MNIST, MNISTModel
from setup_inception import ImageNet, InceptionModel
import tensorflow as tf
import numpy as np
BATCH_SIZE = 1
with tf.Session() as sess:
data, model = MNIST(), MNISTModel("models/mnist", sess)
data, model = CIFAR(), CIFARModel("models/cifar", sess)
data, model = ImageNet(), InceptionModel(sess)
x = tf.placeholder(tf.float32, (None, model.image_size, model.image_size, model.num_channels))
y = model.predict(x)
r = []
for i in range(0,len(data.test_data),BATCH_SIZE):
pred = sess.run(y, {x: data.test_data[i:i+BATCH_SIZE]})
#print(pred)
#print('real',data.test_labels[i],'pred',np.argmax(pred))
r.append(np.argmax(pred,1) == np.argmax(data.test_labels[i:i+BATCH_SIZE],1))
print(np.mean(r))
| 34.9375 | 99 | 0.673524 |
60d8f57cb086d4a3eed093c23d7d290bdab198a5 | 2,498 | py | Python | 0601-0700/0641-Design Circular Deque/0641-Design Circular Deque.py | jiadaizhao/LeetCode | 4ddea0a532fe7c5d053ffbd6870174ec99fc2d60 | [
"MIT"
] | 49 | 2018-05-05T02:53:10.000Z | 2022-03-30T12:08:09.000Z | 0601-0700/0641-Design Circular Deque/0641-Design Circular Deque.py | jolly-fellow/LeetCode | ab20b3ec137ed05fad1edda1c30db04ab355486f | [
"MIT"
] | 11 | 2017-12-15T22:31:44.000Z | 2020-10-02T12:42:49.000Z | 0601-0700/0641-Design Circular Deque/0641-Design Circular Deque.py | jolly-fellow/LeetCode | ab20b3ec137ed05fad1edda1c30db04ab355486f | [
"MIT"
] | 28 | 2017-12-05T10:56:51.000Z | 2022-01-26T18:18:27.000Z | class MyCircularDeque:
def __init__(self, k: int):
"""
Initialize your data structure here. Set the size of the deque to be k.
"""
self.data = [0] * k
self.tail = -1
self.head = self.size = 0
def insertFront(self, value: int) -> bool:
"""
Adds an item at the front of Deque. Return true if the operation is successful.
"""
if self.isFull():
return False
self.head = (self.head - 1) % len(self.data)
self.data[self.head] = value
self.size += 1
return True
def insertLast(self, value: int) -> bool:
"""
Adds an item at the rear of Deque. Return true if the operation is successful.
"""
if self.isFull():
return False
self.tail = (self.tail + 1) % len(self.data)
self.data[self.tail] = value
self.size += 1
return True
def deleteFront(self) -> bool:
"""
Deletes an item from the front of Deque. Return true if the operation is successful.
"""
if self.isEmpty():
return False
self.head = (self.head + 1) % len(self.data)
self.size -= 1
return True
def deleteLast(self) -> bool:
"""
Deletes an item from the rear of Deque. Return true if the operation is successful.
"""
if self.isEmpty():
return False
self.tail = (self.tail - 1) % len(self.data)
self.size -= 1
return True
def getFront(self) -> int:
"""
Get the front item from the deque.
"""
return -1 if self.isEmpty() else self.data[self.head]
def getRear(self) -> int:
"""
Get the last item from the deque.
"""
return -1 if self.isEmpty() else self.data[self.tail]
def isEmpty(self) -> bool:
"""
Checks whether the circular deque is empty or not.
"""
return self.size == 0
def isFull(self) -> bool:
"""
Checks whether the circular deque is full or not.
"""
return self.size == len(self.data)
# Your MyCircularDeque object will be instantiated and called as such:
# obj = MyCircularDeque(k)
# param_1 = obj.insertFront(value)
# param_2 = obj.insertLast(value)
# param_3 = obj.deleteFront()
# param_4 = obj.deleteLast()
# param_5 = obj.getFront()
# param_6 = obj.getRear()
# param_7 = obj.isEmpty()
# param_8 = obj.isFull()
| 27.755556 | 92 | 0.551641 |
941b3207962451cb3aaaa13cc75012290fdd3b72 | 7,072 | py | Python | apps/filebrowser/src/filebrowser/lib/archives.py | t3hi3x/hue | 36d71c1a8dd978b899ef2dc3eef8887b68fd99a8 | [
"Apache-2.0"
] | 11 | 2019-03-20T07:38:35.000Z | 2021-06-18T09:42:46.000Z | apps/filebrowser/src/filebrowser/lib/archives.py | t3hi3x/hue | 36d71c1a8dd978b899ef2dc3eef8887b68fd99a8 | [
"Apache-2.0"
] | null | null | null | apps/filebrowser/src/filebrowser/lib/archives.py | t3hi3x/hue | 36d71c1a8dd978b899ef2dc3eef8887b68fd99a8 | [
"Apache-2.0"
] | 5 | 2019-06-29T03:13:02.000Z | 2020-04-23T04:47:11.000Z | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utilities for dealing with file modes.
import bz2
import os
import posixpath
import tarfile
import tempfile
from desktop.lib.exceptions_renderable import PopupException
from django.utils.translation import ugettext as _
from filebrowser.conf import ARCHIVE_UPLOAD_TEMPDIR
from zipfile import ZipFile
__all__ = ['archive_factory']
class Archive(object):
"""
Acrchive interface.
"""
def extract(self, path):
"""
Extract an Archive.
Should return a directory where the extracted contents live.
"""
raise NotImplemented(_("Must implement 'extract' method."))
def _create_dirs(self, basepath, dirs=[]):
"""
Creates all directories passed at the given basepath.
"""
for directory in dirs:
# Stops if directory start with '/' or points to a relative path
if os.path.isabs(directory) or '..' in directory:
raise IllegalPathException()
directory = os.path.join(basepath, directory)
try:
os.makedirs(directory)
except OSError:
pass
class ZipArchive(Archive):
"""
Acts on a zip file in memory or in a temporary location.
Python's ZipFile class inherently buffers all reading.
"""
def __init__(self, file):
self.file = isinstance(file, basestring) and open(file) or file
self.zfh = ZipFile(self.file)
def extract(self):
"""
Extracts a zip file.
If a 'file' ends with '/', then it is a directory and we must create it.
Else, open a file for writing and meta pipe the contents zipfile to the new file.
"""
# Store all extracted files in a temporary directory.
if ARCHIVE_UPLOAD_TEMPDIR.get():
directory = tempfile.mkdtemp(dir=ARCHIVE_UPLOAD_TEMPDIR.get())
else:
directory = tempfile.mkdtemp()
dirs, files = self._filenames()
self._create_dirs(directory, dirs)
self._create_files(directory, files)
return directory
def _filenames(self):
"""
List all dirs and files by reading the table of contents of the Zipfile.
"""
dirs = []
files = []
for name in self.zfh.namelist():
if name.endswith(posixpath.sep):
dirs.append(name)
else:
files.append(name)
return (dirs, files)
def _create_files(self, basepath, files=[]):
"""
Extract files to their rightful place.
Files are written to a temporary directory immediately after being decompressed.
"""
for f in files:
new_path = os.path.join(basepath, f)
new_file = open(new_path, 'w')
new_file.write(self.zfh.read(f))
new_file.close()
class TarballArchive(Archive):
"""
Acts on a tarball (tar.gz) file in memory or in a temporary location.
Python's ZipFile class inherently buffers all reading.
"""
def __init__(self, file):
if isinstance(file, basestring):
self.path = file
else:
f = tempfile.NamedTemporaryFile(delete=False)
f.write(file.read())
self.path = f.name
f.close()
self.fh = tarfile.open(self.path)
def extract(self):
"""
Extracts a zip file.
If a 'file' ends with '/', then it is a directory and we must create it.
Else, open a file for writing and meta pipe the contents zipfile to the new file.
"""
# Store all extracted files in a temporary directory.
directory = tempfile.mkdtemp()
dirs, files = self._filenames()
self._create_dirs(directory, dirs)
self._create_files(directory, files)
return directory
def _filenames(self):
"""
List all dirs and files by reading the table of contents of the Zipfile.
"""
dirs = []
files = []
for tarinfo in self.fh.getmembers():
if tarinfo.isdir():
dirs.append(tarinfo.name)
else:
files.append(tarinfo.name)
parent = os.path.dirname(tarinfo.path)
# getmembers() sometimes doesn't return all the directories
# Go up the path one directory at the time
while parent != '' and parent not in dirs:
dirs.append(parent)
parent = os.path.dirname(parent)
return (dirs, files)
def _create_files(self, basepath, files=[]):
"""
Extract files to their rightful place.
Files are written to a temporary directory immediately after being decompressed.
"""
for f in files:
new_path = os.path.join(basepath, f)
new_file = open(new_path, 'w')
new_file.write(self.fh.extractfile(f).read())
new_file.close()
class BZ2Archive(Archive):
"""
Acts on a bzip2 file in memory or in a temporary location.
Python's BZ2File class inherently buffers all reading.
"""
def __init__(self, file):
# bzip2 only compresses single files and there is no direct method in the bz2 library to get the file name
self.name = file.name[:-6] if file.name.lower().endswith('.bzip2') else file.name[:-4]
if isinstance(file, basestring):
self.path = file
else:
f = tempfile.NamedTemporaryFile(delete=False)
f.write(file.read())
self.path = f.name
f.close()
self.fh = bz2.BZ2File(self.path)
def extract(self):
"""
Extracts a bz2 file.
Opens the file for writing and meta pipe the contents bz2file to the new file.
"""
# Store all extracted files in a temporary directory.
if ARCHIVE_UPLOAD_TEMPDIR.get():
directory = tempfile.mkdtemp(dir=ARCHIVE_UPLOAD_TEMPDIR.get())
else:
directory = tempfile.mkdtemp()
files = [self.name]
self._create_files(directory, files)
return directory
def _create_files(self, basepath, files=[]):
"""
Files are written to a temporary directory immediately after being decompressed.
"""
for f in files:
new_path = os.path.join(basepath, f)
new_file = open(new_path, 'w')
new_file.write(self.fh.read())
new_file.close()
def archive_factory(path, archive_type='zip'):
if archive_type == 'zip':
return ZipArchive(path)
elif archive_type == 'tarball' or archive_type == 'tar.gz' or archive_type == 'tgz':
return TarballArchive(path)
elif archive_type == 'bz2' or archive_type == 'bzip2':
return BZ2Archive(path)
class IllegalPathException(PopupException):
def __init__(self):
super(IllegalPathException, self).__init__('''Archive path cannot be absolute or contain '..' ''')
| 30.351931 | 110 | 0.677885 |
56feb574844e0cd31af653ba32c0c18bee76b10d | 521 | py | Python | mycms/user/views.py | Anioko/reusable-cms | 52e2a2f11a92c596bd13812d5fd14dffdcdcaa7f | [
"MIT"
] | null | null | null | mycms/user/views.py | Anioko/reusable-cms | 52e2a2f11a92c596bd13812d5fd14dffdcdcaa7f | [
"MIT"
] | 122 | 2020-12-31T06:31:11.000Z | 2022-03-18T14:12:03.000Z | mycms___/webpack/user/views.py | Anioko/reusable-cms | 52e2a2f11a92c596bd13812d5fd14dffdcdcaa7f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""User views."""
from flask import Blueprint, render_template, redirect, url_for
from flask_login import login_required, logout_user, current_user
blueprint = Blueprint("user", __name__, url_prefix="/users", static_folder="../static")
@blueprint.route("/")
@login_required
def members():
"""List members."""
return render_template("users/members.html")
@login_required
@blueprint.route("/logout")
def logout_of_system():
logout_user()
return redirect(url_for("public.home"))
| 24.809524 | 87 | 0.721689 |
febd1d77dc240316770f86d010d4ebecc6ed62d3 | 33,868 | py | Python | jcvi/formats/base.py | ChenSh1ne/jcvi | c71a47fa3602905cdc8468ba1c011fe2028686fa | [
"BSD-2-Clause"
] | null | null | null | jcvi/formats/base.py | ChenSh1ne/jcvi | c71a47fa3602905cdc8468ba1c011fe2028686fa | [
"BSD-2-Clause"
] | null | null | null | jcvi/formats/base.py | ChenSh1ne/jcvi | c71a47fa3602905cdc8468ba1c011fe2028686fa | [
"BSD-2-Clause"
] | 1 | 2020-11-16T19:25:30.000Z | 2020-11-16T19:25:30.000Z | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import print_function
import os
import os.path as op
import math
import sys
import logging
from itertools import groupby, islice, cycle
from Bio import SeqIO
from jcvi.apps.base import (
OptionParser,
ActionDispatcher,
sh,
debug,
need_update,
mkdir,
popen,
)
debug()
FastaExt = ("fasta", "fa", "fna", "cds", "pep", "faa", "fsa", "seq", "nt", "aa")
FastqExt = ("fastq", "fq")
class BaseFile(object):
def __init__(self, filename):
self.filename = filename
if filename:
logging.debug("Load file `{0}`".format(filename))
class LineFile(BaseFile, list):
"""
Generic file parser for line-based files
"""
def __init__(self, filename, comment=None, load=False):
super(LineFile, self).__init__(filename)
if load:
fp = must_open(filename)
self.lines = [l.strip() for l in fp if l[0] != comment]
logging.debug(
"Load {0} lines from `{1}`.".format(len(self.lines), filename)
)
class DictFile(BaseFile, dict):
"""
Generic file parser for multi-column files, keyed by a particular index.
"""
def __init__(
self,
filename,
keypos=0,
valuepos=1,
delimiter=None,
strict=True,
keycast=None,
cast=None,
):
super(DictFile, self).__init__(filename)
self.keypos = keypos
fp = must_open(filename)
ncols = (max(keypos, valuepos) if valuepos else keypos) + 1
thiscols = 0
for lineno, row in enumerate(fp):
row = row.rstrip()
atoms = row.split(delimiter)
thiscols = len(atoms)
if thiscols < ncols:
action = "Aborted" if strict else "Skipped"
msg = "Must contain >= {0} columns. {1}.\n".format(ncols, action)
msg += " --> Line {0}: {1}".format(lineno + 1, row)
logging.error(msg)
if strict:
sys.exit(1)
else:
continue
key = atoms[keypos]
value = atoms[valuepos] if (valuepos is not None) else atoms
if keycast:
key = keycast(key)
if cast:
value = cast(value)
self[key] = value
assert thiscols, "File empty"
self.ncols = thiscols
logging.debug("Imported {0} records from `{1}`.".format(len(self), filename))
class SetFile(BaseFile, set):
def __init__(self, filename, column=-1, delimiter=None):
super(SetFile, self).__init__(filename)
fp = open(filename)
for row in fp:
if not row.strip():
continue
keys = [x.strip() for x in row.split(delimiter)]
if column >= 0:
keys = [keys[column]]
self.update(keys)
class FileShredder(object):
"""
Same as rm -f *
"""
def __init__(self, filelist, verbose=True):
filelist = [x for x in filelist if x and op.exists(x)]
cmd = "rm -rf {0}".format(" ".join(filelist))
sh(cmd, log=verbose)
class FileMerger(object):
"""
Same as cat * > filename
"""
def __init__(self, filelist, outfile):
self.filelist = filelist
self.outfile = outfile
self.ingz = filelist[0].endswith(".gz")
self.outgz = outfile.endswith(".gz")
def merge(self, checkexists=False):
outfile = self.outfile
if checkexists and not need_update(self.filelist, outfile):
logging.debug("File `{0}` exists. Merge skipped.".format(outfile))
return
files = " ".join(self.filelist)
ingz, outgz = self.ingz, self.outgz
if ingz and outgz: # can merge gz files directly
cmd = "cat {0} > {1}".format(files, outfile)
sh(cmd)
else:
cmd = "zcat" if self.ingz else "cat"
cmd += " " + files
sh(cmd, outfile=outfile)
return outfile
class FileSplitter(object):
def __init__(self, filename, outputdir=None, format="fasta", mode="cycle"):
self.filename = filename
self.outputdir = outputdir
self.mode = mode
format = format or self._guess_format(filename)
logging.debug("format is %s" % format)
if format in ("fasta", "fastq"):
self.klass = "seqio"
elif format == "clust":
self.klass = "clust"
else:
self.klass = "txt"
self.format = format
mkdir(outputdir)
def _open(self, filename):
if self.klass == "seqio":
handle = SeqIO.parse(open(filename), self.format)
elif self.klass == "clust":
from jcvi.apps.uclust import ClustFile
handle = iter(ClustFile(filename))
else:
handle = open(filename)
return handle
@property
def num_records(self):
handle = self._open(self.filename)
return sum(1 for x in handle)
def _guess_format(self, filename):
root, ext = op.splitext(filename)
ext = ext.strip(".")
if ext in FastaExt:
format = "fasta"
elif ext in FastqExt:
format = "fastq"
else:
format = "txt"
return format
def _batch_iterator(self, N=1):
"""Returns N lists of records.
This can be used on any iterator, for example to batch up
SeqRecord objects from Bio.SeqIO.parse(...), or to batch
Alignment objects from Bio.AlignIO.parse(...), or simply
lines from a file handle.
This is a generator function, and it returns lists of the
entries from the supplied iterator. Each list will have
batch_size entries, although the final list may be shorter.
"""
batch_size = math.ceil(self.num_records / float(N))
handle = self._open(self.filename)
while True:
batch = list(islice(handle, batch_size))
if not batch:
break
yield batch
@classmethod
def get_names(cls, filename, N):
root, ext = op.splitext(op.basename(filename))
names = []
pad0 = len(str(int(N - 1)))
for i in range(N):
name = "{0}_{1:0{2}d}{3}".format(root, i, pad0, ext)
names.append(name)
return names
def write(self, fw, batch):
if self.klass == "seqio":
SeqIO.write(batch, fw, self.format)
elif self.klass == "clust":
for b in batch:
print(b, file=fw)
else:
for line in batch:
fw.write(line)
return len(batch)
def split(self, N, force=False):
"""
There are two modes of splitting the records
- batch: splitting is sequentially to records/N chunks
- cycle: placing each record in the splitted files and cycles
use `cycle` if the len of the record is not evenly distributed
"""
mode = self.mode
assert mode in ("batch", "cycle", "optimal")
logging.debug("set split mode=%s" % mode)
self.names = self.__class__.get_names(self.filename, N)
if self.outputdir:
self.names = [op.join(self.outputdir, x) for x in self.names]
if not need_update(self.filename, self.names) and not force:
logging.error(
"file %s already existed, skip file splitting" % self.names[0]
)
return
filehandles = [open(x, "w") for x in self.names]
if mode == "batch":
for batch, fw in zip(self._batch_iterator(N), filehandles):
count = self.write(fw, batch)
logging.debug("write %d records to %s" % (count, fw.name))
elif mode == "cycle":
handle = self._open(self.filename)
for record, fw in zip(handle, cycle(filehandles)):
count = self.write(fw, [record])
elif mode == "optimal":
"""
This mode is based on Longest Processing Time (LPT) algorithm:
A simple, often-used algorithm is the LPT algorithm (Longest
Processing Time) which sorts the jobs by its processing time and
then assigns them to the machine with the earliest end time so far.
This algorithm achieves an upper bound of 4/3 - 1/(3m) OPT.
Citation: <http://en.wikipedia.org/wiki/Multiprocessor_scheduling>
"""
endtime = [0] * N
handle = self._open(self.filename)
for record in handle:
mt, mi = min((x, i) for (i, x) in enumerate(endtime))
fw = filehandles[mi]
count = self.write(fw, [record])
endtime[mi] += len(record)
for fw in filehandles:
fw.close()
def longest_unique_prefix(query, targets, remove_self=True):
"""
Find the longest unique prefix for filename, when compared against a list of
filenames. Useful to simplify file names in a pool of files. See usage in
formats.fasta.pool().
"""
query = op.basename(query)
targets = [op.basename(x) for x in targets]
prefix_lengths = [len(op.commonprefix([query, name])) for name in targets]
if remove_self and len(query) in prefix_lengths:
prefix_lengths.remove(len(query))
longest_length = max(prefix_lengths)
return query[: longest_length + 1]
def check_exists(filename, oappend=False):
"""
Avoid overwriting some files accidentally.
"""
if op.exists(filename):
if oappend:
return oappend
logging.error("`{0}` found, overwrite (Y/N)?".format(filename))
overwrite = input() == "Y"
else:
overwrite = True
return overwrite
def timestamp():
from datetime import datetime as dt
return "{0}{1:02d}{2:02d}".format(dt.now().year, dt.now().month, dt.now().day)
def must_open(filename, mode="r", checkexists=False, skipcheck=False, oappend=False):
"""
Accepts filename and returns filehandle.
Checks on multiple files, stdin/stdout/stderr, .gz or .bz2 file.
"""
if isinstance(filename, list):
assert "r" in mode
if filename[0].endswith((".gz", ".bz2")):
filename = " ".join(filename) # allow opening multiple gz/bz2 files
else:
import fileinput
return fileinput.input(filename)
if filename.startswith("s3://"):
from jcvi.utils.aws import pull_from_s3
filename = pull_from_s3(filename)
if filename in ("-", "stdin"):
assert "r" in mode
fp = sys.stdin
elif filename == "stdout":
assert "w" in mode
fp = sys.stdout
elif filename == "stderr":
assert "w" in mode
fp = sys.stderr
elif filename == "tmp" and mode == "w":
from tempfile import NamedTemporaryFile
fp = NamedTemporaryFile(delete=False)
elif filename.endswith(".gz"):
import gzip
if "r" in mode:
fp = gzip.open(filename, mode + "t")
elif "w" in mode:
fp = gzip.open(filename, mode)
elif filename.endswith(".bz2"):
if "r" in mode:
cmd = "bzcat {0}".format(filename)
fp = popen(cmd, debug=False)
elif "w" in mode:
import bz2
fp = bz2.BZ2File(filename, mode)
else:
if checkexists:
assert mode == "w"
overwrite = (
(not op.exists(filename))
if skipcheck
else check_exists(filename, oappend)
)
if overwrite:
if oappend:
fp = open(filename, "a")
else:
fp = open(filename, "w")
else:
logging.debug("File `{0}` already exists. Skipped.".format(filename))
return None
else:
fp = open(filename, mode)
return fp
bash_shebang = "#!/bin/bash"
python_shebang = """#!/usr/bin/env python
# -*- coding: UTF-8 -*-"""
def write_file(filename, contents, meta=None, skipcheck=False, append=False, tee=False):
if not meta:
suffix = filename.rsplit(".", 1)[-1]
if suffix == "sh":
meta = "run script"
elif suffix == "py":
meta = "python script"
else:
meta = "file"
meta_choices = ("file", "run script", "python script")
assert meta in meta_choices, "meta must be one of {0}".format(
"|".join(meta_choices)
)
contents = contents.strip()
shebang = "\n"
if "script" in meta:
if not append:
if meta == "run script":
shebang = bash_shebang
elif meta == "python script":
shebang = python_shebang
contents = "\n\n".join((shebang, contents))
fw = must_open(filename, "w", checkexists=True, skipcheck=skipcheck, oappend=append)
if fw:
print(contents, file=fw)
fw.close()
if tee:
print(contents, file=sys.stderr)
fileop = "appended" if append else "written"
message = "{0} {1} to `{2}`.".format(meta, fileop, filename)
logging.debug(message.capitalize())
if meta == "run script" and not append:
sh("chmod u+x {0}".format(filename))
def read_until(handle, start):
# read each line until a certain start, then puts the start tag back
while 1:
pos = handle.tell()
line = handle.readline()
if not line:
break
if line.startswith(start):
handle.seek(pos)
return
def read_block(handle, signal):
"""
Useful for reading block-like file formats, for example FASTA or OBO file,
such file usually startswith some signal, and in-between the signals are a
record
"""
signal_len = len(signal)
it = (
x[1]
for x in groupby(handle, key=lambda row: row.strip()[:signal_len] == signal)
)
found_signal = False
for header in it:
header = list(header)
for h in header[:-1]:
h = h.strip()
if h[:signal_len] != signal:
continue
yield h, [] # Header only, no contents
header = header[-1].strip()
if header[:signal_len] != signal:
continue
found_signal = True
seq = list(s.strip() for s in next(it))
yield header, seq
if not found_signal:
handle.seek(0)
seq = list(s.strip() for s in handle)
yield None, seq
def is_number(s, cast=float):
"""
Check if a string is a number. Use cast=int to check if s is an integer.
"""
try:
cast(s) # for int, long and float
except ValueError:
return False
return True
def get_number(s, cast=int):
"""
Try to get a number out of a string, and cast it.
"""
import string
d = "".join(x for x in str(s) if x in string.digits)
return cast(d) if d else s
def flexible_cast(s):
if is_number(s, cast=int):
return int(s)
elif is_number(s, cast=float):
return float(s)
return s
def main():
actions = (
("pairwise", "convert a list of IDs into all pairs"),
("split", "split large file into N chunks"),
("reorder", "reorder columns in tab-delimited files"),
("flatten", "convert a list of IDs into one per line"),
("unflatten", "convert lines to a list of IDs on single line"),
("group", "group elements in a table based on key (groupby) column"),
("setop", "set operations on files"),
("join", "join tabular-like files based on common column"),
("subset", "subset tabular-like files based on common column"),
("truncate", "remove lines from end of file"),
("append", "append a column with fixed value"),
("seqids", "make a list of seqids for graphics.karyotype"),
("mergecsv", "merge a set of tsv files"),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def seqids(args):
"""
%prog seqids prefix start end
Make a list of seqids for graphics.karyotype. For example:
$ python -m jcvi.formats.base seqids chromosome_ 1 3
chromosome_1,chromosome_2,chromosome_3
$ python -m jcvi.formats.base seqids A 3 1 --pad0=2
A03,A02,A01
"""
p = OptionParser(seqids.__doc__)
p.add_option("--pad0", default=0, help="How many zeros to pad")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
prefix, start, end = args
pad0 = opts.pad0
start, end = int(start), int(end)
step = 1 if start <= end else -1
print(
",".join(
[
"{}{:0{}d}".format(prefix, x, pad0)
for x in range(start, end + step, step)
]
)
)
def pairwise(args):
"""
%prog pairwise ids
Convert a list of IDs into all pairs.
"""
from itertools import combinations
p = OptionParser(pairwise.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(idsfile,) = args
ids = SetFile(idsfile)
ids = sorted(ids)
fw = open(idsfile + ".pairs", "w")
for a, b in combinations(ids, 2):
print("\t".join((a, b)), file=fw)
fw.close()
def append(args):
"""
%prog append csvfile [tag]
Append a column with fixed value. If tag is missing then just append the
filename.
"""
p = OptionParser(append.__doc__)
p.set_sep()
p.set_outfile()
opts, args = p.parse_args(args)
nargs = len(args)
if nargs not in (1, 2):
sys.exit(not p.print_help())
csvfile = args[0]
tag = args[1] if nargs == 2 else csvfile
fp = must_open(csvfile)
fw = must_open(opts.outfile, "w")
for row in fp:
row = row.rstrip("\r\n")
row = opts.sep.join((row, tag))
print(row, file=fw)
def truncate(args):
"""
%prog truncate linecount filename
Remove linecount lines from the end of the file in-place. Borrowed from:
<http://superuser.com/questions/127786/how-to-remove-the-last-2-lines-of-a-very-large-file>
"""
p = OptionParser(truncate.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
number, filename = args
number = int(number)
count = 0
f = open(filename, "r+b")
f.seek(0, os.SEEK_END)
while f.tell() > 0:
f.seek(-1, os.SEEK_CUR)
char = f.read(1)
if char == "\n":
count += 1
if count == number + 1:
f.truncate()
print("Removed {0} lines from end of file".format(number), file=sys.stderr)
return number
f.seek(-1, os.SEEK_CUR)
if count < number + 1:
print("No change: requested removal would leave empty file", file=sys.stderr)
return -1
def flatten(args):
"""
%prog flatten filename > ids
Convert a list of IDs (say, multiple IDs per line) and move them into one
per line.
For example, convert this, to this:
A,B,C | A
1 | B
a,4 | C
| 1
| a
| 4
If multi-column file with multiple elements per column, zip then flatten like so:
A,B,C 2,10,gg | A,2
1,3 4 | B,10
| C,gg
| 1,4
| 3,na
"""
from six.moves import zip_longest
p = OptionParser(flatten.__doc__)
p.set_sep(sep=",")
p.add_option(
"--zipflatten",
default=None,
dest="zipsep",
help="Specify if columns of the file should be zipped before"
+ " flattening. If so, specify delimiter separating column elements",
)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(tabfile,) = args
zipsep = opts.zipsep
fp = must_open(tabfile)
for row in fp:
if zipsep:
row = row.rstrip()
atoms = row.split(opts.sep)
frows = []
for atom in atoms:
frows.append(atom.split(zipsep))
print(
"\n".join(
[zipsep.join(x) for x in list(zip_longest(*frows, fillvalue="na"))]
)
)
else:
print(row.strip().replace(opts.sep, "\n"))
def unflatten(args):
"""
%prog unflatten idsfile > unflattened
Given a list of ids, one per line, unflatten the list onto a single line with sep.
"""
p = OptionParser(unflatten.__doc__)
p.add_option("--sep", default=",", help="Separator when joining ids")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(idsfile,) = args
ids = must_open(idsfile).read().split()
with must_open(opts.outfile, "w") as fw:
print(opts.sep.join(ids), file=fw)
def group(args):
"""
%prog group tabfile > tabfile.grouped
Given a tab-delimited file, either group all elements within the file or
group the elements in the value column(s) based on the key (groupby) column
For example, convert this | into this
---------------------------------------
a 2 3 4 | a,2,3,4,5,6
a 5 6 | b,7,8
b 7 8 | c,9,10,11
c 9 |
c 10 11 |
If grouping by a particular column,
convert this | into this:
---------------------------------------------
a 2 3 4 | a 2,5 3,6 4
a 5 6 | b 7 8
b 7 8 | c 9,10 11
c 9 |
c 10 11 |
By default, it uniqifies all the grouped elements
"""
from jcvi.utils.cbook import AutoVivification
from jcvi.utils.grouper import Grouper
p = OptionParser(group.__doc__)
p.set_sep()
p.add_option(
"--groupby", default=None, type="int", help="Default column to groupby",
)
p.add_option(
"--groupsep", default=",", help="Separator to join the grouped elements",
)
p.add_option(
"--nouniq",
default=False,
action="store_true",
help="Do not uniqify the grouped elements",
)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(tabfile,) = args
sep = opts.sep
groupby = opts.groupby
groupsep = opts.groupsep
cols = []
grouper = AutoVivification() if groupby is not None else Grouper()
fp = must_open(tabfile)
for row in fp:
row = row.rstrip()
atoms = row.split(sep)
if groupby is not None:
if len(cols) < len(atoms):
cols = [x for x in range(len(atoms))]
if groupby not in cols:
logging.error("groupby col index `{0}` is out of range".format(groupby))
sys.exit()
key = atoms[groupby]
for col in cols:
if col == groupby:
continue
if not grouper[key][col]:
grouper[key][col] = [] if opts.nouniq else set()
if col < len(atoms):
if groupsep in atoms[col]:
for atom in atoms[col].split(groupsep):
if opts.nouniq:
grouper[key][col].append(atom)
else:
grouper[key][col].add(atom)
else:
if opts.nouniq:
grouper[key][col].append(atoms[col])
else:
grouper[key][col].add(atoms[col])
else:
grouper.join(*atoms)
for key in grouper:
if groupby is not None:
line = []
for col in cols:
if col == groupby:
line.append(key)
elif col in grouper[key].keys():
line.append(groupsep.join(grouper[key][col]))
else:
line.append("na")
print(sep.join(line))
else:
print(groupsep.join(key))
def reorder(args):
"""
%prog reorder tabfile 1,2,4,3 > newtabfile
Reorder columns in tab-delimited files. The above syntax will print out a
new file with col-1,2,4,3 from the old file.
"""
import csv
p = OptionParser(reorder.__doc__)
p.set_sep()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
tabfile, order = args
sep = opts.sep
order = [int(x) - 1 for x in order.split(",")]
reader = csv.reader(must_open(tabfile), delimiter=sep)
writer = csv.writer(sys.stdout, delimiter=sep)
for row in reader:
newrow = [row[x] for x in order]
writer.writerow(newrow)
def split(args):
"""
%prog split file outdir N
Split file into N records. This allows splitting FASTA/FASTQ/TXT file
properly at boundary of records. Split is useful for parallelization
on input chunks.
Option --mode is useful on how to break into chunks.
1. chunk - chunk records sequentially, 1-100 in file 1, 101-200 in file 2, etc.
2. cycle - chunk records in Round Robin fashion
3. optimal - try to make split file of roughly similar sizes, using LPT
algorithm. This is the default.
"""
p = OptionParser(split.__doc__)
mode_choices = ("batch", "cycle", "optimal")
p.add_option(
"--all", default=False, action="store_true", help="split all records",
)
p.add_option(
"--mode",
default="optimal",
choices=mode_choices,
help="Mode when splitting records",
)
p.add_option(
"--format",
choices=("fasta", "fastq", "txt", "clust"),
help="input file format",
)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
filename, outdir, N = args
fs = FileSplitter(filename, outputdir=outdir, format=opts.format, mode=opts.mode)
if opts.all:
logging.debug("option -all override N")
N = fs.num_records
else:
N = min(fs.num_records, int(N))
assert N > 0, "N must be > 0"
logging.debug("split file into %d chunks" % N)
fs.split(N)
return fs
def join(args):
"""
%prog join file1.txt(pivotfile) file2.txt ..
Join tabular-like files based on common column.
--column specifies the column index to pivot on.
Use comma to separate multiple values if the pivot column is different
in each file. Maintain the order in the first file.
--sep specifies the column separators, default to tab.
Use comma to separate multiple values if the column separator is different
in each file.
"""
p = OptionParser(join.__doc__)
p.add_option(
"--column", default="0", help="0-based column id, multiple values allowed",
)
p.set_sep(multiple=True)
p.add_option(
"--noheader", default=False, action="store_true", help="Do not print header",
)
p.add_option("--na", default="na", help="Value for unjoined data")
p.add_option(
"--compact",
default=False,
action="store_true",
help="Do not repeat pivotal columns in output",
)
p.add_option(
"--keysep",
default=",",
help="specify separator joining multiple elements in the key column"
+ " of the pivot file",
)
p.set_outfile()
opts, args = p.parse_args(args)
nargs = len(args)
keysep = opts.keysep
compact = opts.compact
if len(args) < 2:
sys.exit(not p.print_help())
na = opts.na
c = opts.column
if "," in c:
cc = [int(x) for x in c.split(",")]
else:
cc = [int(c)] * nargs
assert len(cc) == nargs, "Column index number != File number"
s = opts.sep
if "," in s:
ss = [x for x in s.split(",")]
else:
ss = [s] * nargs
assert len(ss) == nargs, "column separator number != File number"
# Maintain the first file line order, and combine other files into it
pivotfile = args[0]
files = [
DictFile(f, keypos=c, valuepos=None, delimiter=s)
for f, c, s in zip(args, cc, ss)
]
otherfiles = files[1:]
# The header contains filenames
headers = []
for i, x in enumerate(files):
ncols = x.ncols
if i and compact:
ncols -= 1
headers += [op.basename(x.filename)] * ncols
header = "\t".join(headers)
fp = must_open(pivotfile)
fw = must_open(opts.outfile, "w")
if not opts.noheader:
print(header, file=fw)
for row in fp:
row = row.rstrip()
atoms = row.split(ss[0])
newrow = atoms
key = atoms[cc[0]]
keys = key.split(keysep) if keysep in key else [key]
for d in otherfiles:
drows = list()
for key in keys:
krow = d.get(key, [na] * d.ncols)
if compact:
krow.pop(d.keypos)
drows.append(krow)
drow = [keysep.join(x) for x in list(zip(*drows))]
newrow += drow
print("\t".join(newrow), file=fw)
def subset(args):
"""
%prog subset file1.txt(pivotfile) file2.txt ..
subset tabular-like file1 based on common column with file 2.
Normally file1 should have unique row entries.
If more than one file2 are provided, they must have same column separators.
Multiple file2's will be concatenated in the output.
--column specifies the column index (0-based) to pivot on.
Use comma to separate multiple values if the pivot column is different
in each file. Maintain the order in the first file.
--sep specifies the column separators, default to tab.
Use comma to separate multiple values if the column separator is different
in each file.
"""
p = OptionParser(subset.__doc__)
p.add_option(
"--column", default="0", help="0-based column id, multiple values allowed",
)
p.set_sep(multiple=True)
p.add_option(
"--pivot",
default=1,
type="int",
help="1 for using order in file1, 2 for using order in \
file2",
)
p.set_outfile()
opts, args = p.parse_args(args)
nargs = len(args)
if len(args) < 2:
sys.exit(not p.print_help())
c = opts.column
if "," in c:
cc = [int(x) for x in c.split(",")]
assert len(set(cc[1:])) == 1, "Multiple file2's must have same column index."
cc = cc[0:2]
else:
cc = [int(c)] * 2
s = opts.sep
if "," in s:
ss = [x for x in s.split(",")]
assert (
len(set(cc[1:])) == 1
), "Multiple file2's must have same column separator."
ss = ss[0:2]
else:
ss = [s] * 2
if nargs > 2:
file2 = FileMerger(args[1:], outfile="concatenatedFile2").merge()
else:
file2 = args[1]
newargs = [args[0], file2]
files = [
DictFile(f, keypos=c, valuepos=None, delimiter=s)
for f, c, s in zip(newargs, cc, ss)
]
pivot = 0 if opts.pivot == 1 else 1
fp = open(newargs[pivot])
fw = must_open(opts.outfile, "w")
for row in fp:
row = row.rstrip()
atoms = row.split(ss[pivot])
key = atoms[cc[pivot]]
d = files[1 - pivot]
if key in d:
print(ss[0].join(files[0][key]), file=fw)
if nargs > 2:
FileShredder([file2])
def setop(args):
"""
%prog setop "fileA & fileB" > newfile
Perform set operations, except on files. The files (fileA and fileB) contain
list of ids. The operator is one of the four:
|: union (elements found in either file)
&: intersection (elements found in both)
-: difference (elements in fileA but not in fileB)
^: symmetric difference (elementes found in either set but not both)
Please quote the argument to avoid shell interpreting | and &.
"""
from jcvi.utils.natsort import natsorted
p = OptionParser(setop.__doc__)
p.add_option(
"--column",
default=0,
type="int",
help="The column to extract, 0-based, -1 to disable",
)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(statement,) = args
fa, op, fb = statement.split()
assert op in ("|", "&", "-", "^")
column = opts.column
fa = SetFile(fa, column=column)
fb = SetFile(fb, column=column)
if op == "|":
t = fa | fb
elif op == "&":
t = fa & fb
elif op == "-":
t = fa - fb
elif op == "^":
t = fa ^ fb
for x in natsorted(t):
print(x)
def mergecsv(args):
"""
%prog mergecsv *.tsv
Merge a set of tsv files.
"""
p = OptionParser(mergecsv.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
tsvfiles = args
outfile = opts.outfile
if op.exists(outfile):
os.remove(outfile)
tsvfile = tsvfiles[0]
fw = must_open(opts.outfile, "w")
for i, tsvfile in enumerate(tsvfiles):
fp = open(tsvfile)
if i > 0:
next(fp)
for row in fp:
fw.write(row)
fw.close()
if __name__ == "__main__":
main()
| 28.199833 | 95 | 0.549486 |
fd6bb3fe420f8919595ffe40d77a4f60e4394a0c | 1,433 | py | Python | cells/FS_BasketCell.py | joyabhatt/jb-inhibition | c521e69ff255063ded4fe9139f89c3531cf5f4fb | [
"MIT"
] | null | null | null | cells/FS_BasketCell.py | joyabhatt/jb-inhibition | c521e69ff255063ded4fe9139f89c3531cf5f4fb | [
"MIT"
] | null | null | null | cells/FS_BasketCell.py | joyabhatt/jb-inhibition | c521e69ff255063ded4fe9139f89c3531cf5f4fb | [
"MIT"
] | null | null | null | from neuron import h
from math import pi, exp
import numpy as np
###############################################################################
# Soma-targeting interneuron (fast-spiking Basket Cell -- Bas)
###############################################################################
class Bas:
"Basket cell"
def __init__ (self):
self.soma = soma = h.Section(name='soma',cell=self)
self.soma.insert('k_ion')
self.soma.insert('na_ion')
self.soma.ek = -90 # K+ current reversal potential (mV)
self.soma.ena = 60 # Na+ current reversal potential (mV)
self.soma.Ra=100
self.set_morphology()
self.set_conductances()
def set_morphology(self):
total_area = 10000 # um2
self.soma.nseg = 1
self.soma.cm = 1 # uF/cm2
self.soma.diam = np.sqrt(total_area) # um
self.soma.L = self.soma.diam/pi # um
def set_conductances(self):
self.soma.insert('pas')
self.soma.e_pas = -65 # mV
self.soma.g_pas = 0.1e-3 # S/cm2
self.soma.insert('Nafbwb')
self.soma.insert('Kdrbwb')
# def set_synapses(self):
# self.somaGABAf=Synapse(sect=self.soma,loc=0.5,tau1=0.07,tau2=9.1,e=-80);
# self.somaGABAss=Synapse(sect=self.soma,loc=0.5,tau1=20,tau2=40,e=-80);
# self.somaAMPA=Synapse(sect=self.soma,loc=0.5,tau1=0.05,tau2=5.3,e=0);
# self.somaNMDA=SynapseNMDA(sect=self.soma,loc=0.5, tau1NMDA=tau1NMDAEI,tau2NMDA=tau2NMDAEI,r=1,e=0);
| 36.74359 | 104 | 0.582694 |
88dd495963fef65b612e3fb1cb885c62a100f955 | 4,067 | py | Python | conans/server/store/disk_adapter.py | CMelanie/conan | f9bbe4a355365625202d0fdd591f2f4b0d101cf0 | [
"MIT"
] | null | null | null | conans/server/store/disk_adapter.py | CMelanie/conan | f9bbe4a355365625202d0fdd591f2f4b0d101cf0 | [
"MIT"
] | null | null | null | conans/server/store/disk_adapter.py | CMelanie/conan | f9bbe4a355365625202d0fdd591f2f4b0d101cf0 | [
"MIT"
] | null | null | null | import os
import fasteners
from conans.client.tools.env import no_op
from conans.errors import NotFoundException
from conans.util.files import decode_text, md5sum, path_exists, relative_dirs, rmdir
class ServerDiskAdapter(object):
'''Manage access to disk files with common methods required
for conan operations'''
def __init__(self, base_url, base_storage_path, updown_auth_manager):
"""
:param: base_url Base url for generate urls to download and upload operations"""
self.base_url = base_url
# URLs are generated removing this base path
self.updown_auth_manager = updown_auth_manager
self._store_folder = base_storage_path
# ONLY USED BY APIV1
def get_download_urls(self, paths, user=None):
'''Get the urls for download the specified files using s3 signed request.
returns a dict with this structure: {"filepath": "http://..."}
paths is a list of path files '''
assert isinstance(paths, list)
ret = {}
for filepath in paths:
url_path = os.path.relpath(filepath, self._store_folder)
url_path = url_path.replace("\\", "/")
# FALTA SIZE DEL FICHERO PARA EL UPLOAD URL!
signature = self.updown_auth_manager.get_token_for(url_path, user)
url = "%s/%s?signature=%s" % (self.base_url, url_path, decode_text(signature))
ret[filepath] = url
return ret
# ONLY USED BY APIV1
def get_upload_urls(self, paths_sizes, user=None):
'''Get the urls for upload the specified files using s3 signed request.
returns a dict with this structure: {"filepath": "http://..."}
paths_sizes is a dict of {path: size_in_bytes} '''
assert isinstance(paths_sizes, dict)
ret = {}
for filepath, filesize in paths_sizes.items():
url_path = os.path.relpath(filepath, self._store_folder)
url_path = url_path.replace("\\", "/")
# FALTA SIZE DEL FICHERO PARA EL UPLOAD URL!
signature = self.updown_auth_manager.get_token_for(url_path, user, filesize)
url = "%s/%s?signature=%s" % (self.base_url, url_path, decode_text(signature))
ret[filepath] = url
return ret
def _get_paths(self, absolute_path, files_subset):
if not path_exists(absolute_path, self._store_folder):
raise NotFoundException("")
paths = relative_dirs(absolute_path)
if files_subset is not None:
paths = set(paths).intersection(set(files_subset))
abs_paths = [os.path.join(absolute_path, relpath) for relpath in paths]
return abs_paths
def get_snapshot(self, absolute_path="", files_subset=None):
"""returns a dict with the filepaths and md5"""
abs_paths = self._get_paths(absolute_path, files_subset)
return {filepath: md5sum(filepath) for filepath in abs_paths}
def get_file_list(self, absolute_path="", files_subset=None):
abs_paths = self._get_paths(absolute_path, files_subset)
return abs_paths
def delete_folder(self, path):
'''Delete folder from disk. Path already contains base dir'''
if not path_exists(path, self._store_folder):
raise NotFoundException("")
rmdir(path)
def delete_file(self, path):
'''Delete files from bucket. Path already contains base dir'''
if not path_exists(path, self._store_folder):
raise NotFoundException("")
os.remove(path)
def path_exists(self, path):
return os.path.exists(path)
def read_file(self, path, lock_file):
with fasteners.InterProcessLock(lock_file) if lock_file else no_op():
with open(path) as f:
return f.read()
def write_file(self, path, contents, lock_file):
with fasteners.InterProcessLock(lock_file) if lock_file else no_op():
with open(path, "w") as f:
f.write(contents)
def base_storage_folder(self):
return self._store_folder
| 39.105769 | 90 | 0.653799 |
9d0cd09ecd940f113875b892fd91a4820e2de70c | 306 | py | Python | tests/models/test_CountryCodeAlpha2.py | developmentseed/unicef-schools-attribute-cleaning | eb3dd8a02f26e3455ee04ac2788e79c205ae97e5 | [
"MIT"
] | null | null | null | tests/models/test_CountryCodeAlpha2.py | developmentseed/unicef-schools-attribute-cleaning | eb3dd8a02f26e3455ee04ac2788e79c205ae97e5 | [
"MIT"
] | 3 | 2020-10-08T15:28:38.000Z | 2020-10-15T14:37:00.000Z | tests/models/test_CountryCodeAlpha2.py | developmentseed/unicef-schools-attribute-cleaning | eb3dd8a02f26e3455ee04ac2788e79c205ae97e5 | [
"MIT"
] | null | null | null | import pytest
from unicef_schools_attribute_cleaning.models.CountryCodeAlpha2 import (
CountryCodeAlpha2,
country_code_validator,
)
def test_validator():
code: CountryCodeAlpha2 = "MX"
country_code_validator(code)
with pytest.raises(ValueError):
country_code_validator("ZZ")
| 21.857143 | 72 | 0.761438 |
80bf1ef7b114a0daa808ec6933cd2740b037dae2 | 5,992 | py | Python | align/schema/checker.py | kkunal1408/ALIGN-public | 51ab942d393f19346bc179b991d42b4e35285e08 | [
"BSD-3-Clause"
] | null | null | null | align/schema/checker.py | kkunal1408/ALIGN-public | 51ab942d393f19346bc179b991d42b4e35285e08 | [
"BSD-3-Clause"
] | null | null | null | align/schema/checker.py | kkunal1408/ALIGN-public | 51ab942d393f19346bc179b991d42b4e35285e08 | [
"BSD-3-Clause"
] | null | null | null | import abc
import collections
import logging
logger = logging.getLogger(__name__)
import z3
class CheckerError(Exception):
def __init__(self, message, labels=None):
self.message = message
self.labels = labels
super().__init__(self.message)
class AbstractChecker(abc.ABC):
@abc.abstractmethod
def append(self, formula, label=None):
'''
Append formula to checker.
Note: Please use bbox variables to create formulae
Otherwise you will need to manage types
yourself
'''
pass
@abc.abstractmethod
def label(self, object):
'''
Generate label that can be used for
back-annotation
'''
return None
@abc.abstractmethod
def checkpoint(self):
'''
Checkpoint current state of solver
Note: We assume incremental solving here
May need to revisit if we have to
rebuild solution from scratch
'''
pass
@abc.abstractmethod
def revert(self):
'''
Revert to last checkpoint
Note: We assume incremental solving here
May need to revisit if we have to
rebuild solution from scratch
'''
pass
@abc.abstractmethod
def bbox_vars(self, name):
'''
Generate a single namedtuple containing
appropriate checker variables for
placement constraints
'''
pass
def iter_bbox_vars(self, names):
'''
Helper utility to generate multiple bbox variables
The output should be an iterator that allows you
to loop over bboxes (use `yield` when possible)
'''
for name in names:
yield self.bbox_vars(name)
@abc.abstractmethod
def And(self, *expressions):
'''
Logical `And` of all arguments
Note: arguments are assumed to be
boolean expressions
'''
pass
@abc.abstractmethod
def Or(self, *expressions):
'''
Logical `Or` of all arguments
Note: arguments are assumed to be
boolean expressions
'''
pass
@abc.abstractmethod
def Not(self, expr):
'''
Logical `Not` of argument
Note: argument is assumed to be
a boolean expression
'''
pass
@abc.abstractmethod
def Implies(self, expr1, expr2):
'''
expr1 => expr2
Note: both arguments are assumed
to be boolean expressions
'''
pass
@abc.abstractmethod
def cast(expr, type_):
'''
cast `expr` to `type_`
Note: Use with care. Not all
engines support all types
'''
pass
@abc.abstractmethod
def Abs(self, expr):
'''
Absolute value of expression
Note: argument is assumed to be
arithmetic expression
'''
pass
class Z3Checker(AbstractChecker):
def __init__(self):
self._label_cache = {}
self._bbox_cache = {}
self._bbox_subcircuit = {}
self._solver = z3.Solver()
self._solver.set(unsat_core=True)
def append(self, formula, label=None):
if label is not None:
self._solver.assert_and_track(formula, label)
else:
self._solver.add(formula)
r = self._solver.check()
if r == z3.unsat:
z3.set_option(max_depth=10000, max_args=100, max_lines=10000)
logger.debug(f"Unsat encountered: {self._solver}")
raise CheckerError(
message=f'Trying to add {formula} resulted in unsat',
labels=self._solver.unsat_core())
def label(self, object):
# Z3 throws 'index out of bounds' error
# if more than 9 digits are used
return z3.Bool(
hash(repr(object)) % 10**9
)
def checkpoint(self):
self._solver.push()
def revert(self):
self._solver.pop()
def bbox_vars(self, name, is_subcircuit=False):
# bbox was previously generated
if name in self._bbox_cache:
return self._bbox_cache[name]
# generate new bbox
b = self._generate_var(
'Bbox',
llx=f'{name}_llx',
lly=f'{name}_lly',
urx=f'{name}_urx',
ury=f'{name}_ury')
# width / height cannot be 0
self.append(b.llx < b.urx)
self.append(b.lly < b.ury)
if is_subcircuit:
self._bbox_subcircuit[name] = True
else:
# Do not overlap with other instance bboxes
for k2, b2 in self._bbox_cache.items():
if k2 not in self._bbox_subcircuit:
self.append(
self.Or(
b.urx <= b2.llx,
b2.urx <= b.llx,
b.ury <= b2.lly,
b2.ury <= b.lly,
)
)
self._bbox_cache[name] = b
return b
@staticmethod
def Or(*expressions):
return z3.Or(*expressions)
@staticmethod
def And(*expressions):
return z3.And(*expressions)
@staticmethod
def Not(expr):
return z3.Not(expr)
@staticmethod
def Abs(expr):
return z3.If(expr >= 0, expr, expr * -1)
@staticmethod
def Implies(expr1, expr2):
return z3.Implies(expr1, expr2)
@staticmethod
def cast(expr, type_):
if type_ == float:
return z3.ToReal(expr)
else:
raise NotImplementedError
@staticmethod
def _generate_var(name, **fields):
if fields:
return collections.namedtuple(
name,
fields.keys(),
)(*z3.Ints(' '.join(fields.values())))
else:
return z3.Int(name)
| 24.966667 | 73 | 0.538385 |