code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
'''
Function script to split a list in n parts
This function can be reused in any project or script.
If you have the following list: [1,2,3,4,5,6,7,8,9,10] and want to break it in
5 parts, you should do:
>>> new_list = [1,2,3,4,5,6,7,8,9,10]
>>> print breaker(new_list, 5)
And you should get:
[[1,2], [3,4], [5,6], [7,8], [9,10]]
'''
def breaker(array, parts):
return list(array[ part*len(array)/parts:(part+1)*len(array)/parts ] for part in range(parts))
| niceandcoolusername/cosmos | code/unclassified/split_list/split_list.py | Python | gpl-3.0 | 476 |
#!/usr/bin/env python
__author__ = 'greg'
from cassandra.cluster import Cluster
import cassandra
import pymongo
import uuid
import json
from cassandra.concurrent import execute_concurrent
cluster = Cluster()
cassandra_session = cluster.connect('serengeti')
# try:
# cassandra_session.execute("drop table classifications")
# print "table dropped"
# except cassandra.InvalidRequest:
# print "table did not exist"
# pass
# cassandra_session.execute("CREATE TABLE classifications(id int, created_at timestamp,zooniverse_id text,annotations text,user_name text, user_ip inet, PRIMARY KEY(id, created_at,user_ip)) WITH CLUSTERING ORDER BY (created_at ASC, user_ip ASC);")
cassandra_session.execute("CREATE TABLE ip_classifications (id int, created_at timestamp,zooniverse_id text,annotations text,user_name text, user_ip inet, PRIMARY KEY(id, user_ip,created_at)) WITH CLUSTERING ORDER BY (user_ip ASC,created_at ASC);")
# connect to the mongo server
client = pymongo.MongoClient()
db = client['serengeti_2015-02-22']
classification_collection = db["serengeti_classifications"]
subject_collection = db["serengeti_subjects"]
user_collection = db["serengeti_users"]
insert_statement = cassandra_session.prepare("""insert into ip_classifications (id,created_at, zooniverse_id,annotations, user_name,user_ip)
values (?,?,?,?,?,?)""")
statements_and_params = []
for ii,classification in enumerate(classification_collection.find()):
created_at = classification["created_at"]
if "user_name" in classification:
user_name = classification["user_name"]
else:
user_name = ""
user_ip = classification["user_ip"]
annotations = classification["annotations"]
id = uuid.uuid1()
zooniverse_id = classification["subjects"][0]["zooniverse_id"]
params = (1,created_at,zooniverse_id,json.dumps(annotations),user_name,user_ip)
statements_and_params.append((insert_statement, params))
if (ii > 0) and (ii % 50000 == 0):
print ii
r = execute_concurrent(cassandra_session, statements_and_params, raise_on_first_error=True)
statements_and_params = []
r = execute_concurrent(cassandra_session, statements_and_params, raise_on_first_error=True) | zooniverse/aggregation | experimental/algorithms/serengeti_blank/serengeti_cass.py | Python | apache-2.0 | 2,234 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train CIFAR-10 using multiple GPU's with synchronous updates.
Accuracy:
cifar10_multi_gpu_train.py achieves ~86% accuracy after 100K steps (256
epochs of data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
--------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
2 Tesla K20m | 0.13-0.20 | ~84% at 30K steps (2.5 hours)
3 Tesla K20m | 0.13-0.18 | ~84% at 30K steps
4 Tesla K20m | ~0.10 | ~84% at 30K steps
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import re
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.models.image.cifar10 import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_integer('num_gpus', 1,
"""How many GPUs to use.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def tower_loss(scope):
"""Calculate the total loss on a single tower running the CIFAR model.
Args:
scope: unique prefix string identifying the CIFAR tower, e.g. 'tower_0'
Returns:
Tensor of shape [] containing the total loss for a batch of data
"""
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
# Build inference Graph.
logits = cifar10.inference(images)
# Build the portion of the Graph calculating the losses. Note that we will
# assemble the total_loss using a custom function below.
_ = cifar10.loss(logits, labels)
# Assemble all of the losses for the current tower only.
losses = tf.get_collection('losses', scope)
# Calculate the total loss for the current tower.
total_loss = tf.add_n(losses, name='total_loss')
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
loss_name = re.sub('%s_[0-9]*/' % cifar10.TOWER_NAME, '', l.op.name)
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(loss_name +' (raw)', l)
tf.scalar_summary(loss_name, loss_averages.average(l))
with tf.control_dependencies([loss_averages_op]):
total_loss = tf.identity(total_loss)
return total_loss
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(0, grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Create a variable to count the number of train() calls. This equals the
# number of batches processed * FLAGS.num_gpus.
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), trainable=False)
# Calculate the learning rate schedule.
num_batches_per_epoch = (cifar10.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN /
FLAGS.batch_size)
decay_steps = int(num_batches_per_epoch * cifar10.NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(cifar10.INITIAL_LEARNING_RATE,
global_step,
decay_steps,
cifar10.LEARNING_RATE_DECAY_FACTOR,
staircase=True)
# Create an optimizer that performs gradient descent.
opt = tf.train.GradientDescentOptimizer(lr)
# Calculate the gradients for each model tower.
tower_grads = []
for i in xrange(FLAGS.num_gpus):
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % (cifar10.TOWER_NAME, i)) as scope:
# Calculate the loss for one tower of the CIFAR model. This function
# constructs the entire CIFAR model but shares the variables across
# all towers.
loss = tower_loss(scope)
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
# Retain the summaries from the final tower.
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# Calculate the gradients for the batch of data on this CIFAR tower.
grads = opt.compute_gradients(loss)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = average_gradients(tower_grads)
# Add a summary to track the learning rate.
summaries.append(tf.scalar_summary('learning_rate', lr))
# Add histograms for gradients.
for grad, var in grads:
if grad:
summaries.append(
tf.histogram_summary(var.op.name + '/gradients', grad))
# Apply the gradients to adjust the shared variables.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
summaries.append(tf.histogram_summary(var.op.name, var))
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
cifar10.MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
# Group all updates to into a single train op.
train_op = tf.group(apply_gradient_op, variables_averages_op)
# Create a saver.
saver = tf.train.Saver(tf.all_variables())
# Build the summary operation from the last tower summaries.
summary_op = tf.merge_summary(summaries)
# Build an initialization operation to run below.
init = tf.initialize_all_variables()
# Start running operations on the Graph. allow_soft_placement must be set to
# True to build towers on GPU, as some of the ops do not have GPU
# implementations.
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
sess.run(init)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir,
graph_def=sess.graph_def)
for step in xrange(FLAGS.max_steps):
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
num_examples_per_step = FLAGS.batch_size * FLAGS.num_gpus
examples_per_sec = num_examples_per_step / duration
sec_per_batch = duration / FLAGS.num_gpus
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch))
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
| dllatas/deepLearning | uppmax/cifar10_multi_gpu_train.py | Python | mit | 10,719 |
"""Voluptuous schemas for the KNX integration."""
import voluptuous as vol
from xknx.devices.climate import SetpointShiftMode
from xknx.io import DEFAULT_MCAST_PORT
from xknx.telegram.address import GroupAddress, IndividualAddress
from homeassistant.const import (
CONF_DEVICE_CLASS,
CONF_ENTITY_ID,
CONF_HOST,
CONF_NAME,
CONF_PORT,
CONF_TYPE,
)
import homeassistant.helpers.config_validation as cv
from .const import (
CONF_INVERT,
CONF_RESET_AFTER,
CONF_STATE_ADDRESS,
CONF_SYNC_STATE,
CONTROLLER_MODES,
KNX_ADDRESS,
PRESET_MODES,
ColorTempModes,
)
##################
# KNX VALIDATORS
##################
ga_validator = vol.Any(
cv.matches_regex(GroupAddress.ADDRESS_RE.pattern),
vol.All(vol.Coerce(int), vol.Range(min=1, max=65535)),
msg="value does not match pattern for KNX group address '<main>/<middle>/<sub>', '<main>/<sub>' or '<free>' (eg.'1/2/3', '9/234', '123')",
)
ga_list_validator = vol.All(cv.ensure_list, [ga_validator])
ia_validator = vol.Any(
cv.matches_regex(IndividualAddress.ADDRESS_RE.pattern),
vol.All(vol.Coerce(int), vol.Range(min=1, max=65535)),
msg="value does not match pattern for KNX individual address '<area>.<line>.<device>' (eg.'1.1.100')",
)
sync_state_validator = vol.Any(
vol.All(vol.Coerce(int), vol.Range(min=2, max=1440)),
cv.boolean,
cv.matches_regex(r"^(init|expire|every)( \d*)?$"),
)
sensor_type_validator = vol.Any(int, str)
##############
# CONNECTION
##############
class ConnectionSchema:
"""Voluptuous schema for KNX connection."""
CONF_KNX_LOCAL_IP = "local_ip"
CONF_KNX_ROUTE_BACK = "route_back"
TUNNELING_SCHEMA = vol.Schema(
{
vol.Optional(CONF_PORT, default=DEFAULT_MCAST_PORT): cv.port,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_KNX_LOCAL_IP): cv.string,
vol.Optional(CONF_KNX_ROUTE_BACK, default=False): cv.boolean,
}
)
ROUTING_SCHEMA = vol.Maybe(vol.Schema({vol.Optional(CONF_KNX_LOCAL_IP): cv.string}))
#############
# PLATFORMS
#############
class BinarySensorSchema:
"""Voluptuous schema for KNX binary sensors."""
CONF_STATE_ADDRESS = CONF_STATE_ADDRESS
CONF_SYNC_STATE = CONF_SYNC_STATE
CONF_INVERT = CONF_INVERT
CONF_IGNORE_INTERNAL_STATE = "ignore_internal_state"
CONF_CONTEXT_TIMEOUT = "context_timeout"
CONF_RESET_AFTER = CONF_RESET_AFTER
DEFAULT_NAME = "KNX Binary Sensor"
SCHEMA = vol.All(
cv.deprecated("significant_bit"),
cv.deprecated("automation"),
vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SYNC_STATE, default=True): sync_state_validator,
vol.Optional(CONF_IGNORE_INTERNAL_STATE, default=False): cv.boolean,
vol.Optional(CONF_INVERT, default=False): cv.boolean,
vol.Required(CONF_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_CONTEXT_TIMEOUT): vol.All(
vol.Coerce(float), vol.Range(min=0, max=10)
),
vol.Optional(CONF_DEVICE_CLASS): cv.string,
vol.Optional(CONF_RESET_AFTER): cv.positive_float,
}
),
)
class ClimateSchema:
"""Voluptuous schema for KNX climate devices."""
CONF_SETPOINT_SHIFT_ADDRESS = "setpoint_shift_address"
CONF_SETPOINT_SHIFT_STATE_ADDRESS = "setpoint_shift_state_address"
CONF_SETPOINT_SHIFT_MODE = "setpoint_shift_mode"
CONF_SETPOINT_SHIFT_MAX = "setpoint_shift_max"
CONF_SETPOINT_SHIFT_MIN = "setpoint_shift_min"
CONF_TEMPERATURE_ADDRESS = "temperature_address"
CONF_TEMPERATURE_STEP = "temperature_step"
CONF_TARGET_TEMPERATURE_ADDRESS = "target_temperature_address"
CONF_TARGET_TEMPERATURE_STATE_ADDRESS = "target_temperature_state_address"
CONF_OPERATION_MODE_ADDRESS = "operation_mode_address"
CONF_OPERATION_MODE_STATE_ADDRESS = "operation_mode_state_address"
CONF_CONTROLLER_STATUS_ADDRESS = "controller_status_address"
CONF_CONTROLLER_STATUS_STATE_ADDRESS = "controller_status_state_address"
CONF_CONTROLLER_MODE_ADDRESS = "controller_mode_address"
CONF_CONTROLLER_MODE_STATE_ADDRESS = "controller_mode_state_address"
CONF_HEAT_COOL_ADDRESS = "heat_cool_address"
CONF_HEAT_COOL_STATE_ADDRESS = "heat_cool_state_address"
CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS = (
"operation_mode_frost_protection_address"
)
CONF_OPERATION_MODE_NIGHT_ADDRESS = "operation_mode_night_address"
CONF_OPERATION_MODE_COMFORT_ADDRESS = "operation_mode_comfort_address"
CONF_OPERATION_MODE_STANDBY_ADDRESS = "operation_mode_standby_address"
CONF_OPERATION_MODES = "operation_modes"
CONF_CONTROLLER_MODES = "controller_modes"
CONF_ON_OFF_ADDRESS = "on_off_address"
CONF_ON_OFF_STATE_ADDRESS = "on_off_state_address"
CONF_ON_OFF_INVERT = "on_off_invert"
CONF_MIN_TEMP = "min_temp"
CONF_MAX_TEMP = "max_temp"
CONF_CREATE_TEMPERATURE_SENSORS = "create_temperature_sensors"
DEFAULT_NAME = "KNX Climate"
DEFAULT_SETPOINT_SHIFT_MODE = "DPT6010"
DEFAULT_SETPOINT_SHIFT_MAX = 6
DEFAULT_SETPOINT_SHIFT_MIN = -6
DEFAULT_TEMPERATURE_STEP = 0.1
DEFAULT_ON_OFF_INVERT = False
SCHEMA = vol.All(
cv.deprecated("setpoint_shift_step", replacement_key=CONF_TEMPERATURE_STEP),
vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(
CONF_SETPOINT_SHIFT_MODE, default=DEFAULT_SETPOINT_SHIFT_MODE
): vol.All(vol.Upper, cv.enum(SetpointShiftMode)),
vol.Optional(
CONF_SETPOINT_SHIFT_MAX, default=DEFAULT_SETPOINT_SHIFT_MAX
): vol.All(int, vol.Range(min=0, max=32)),
vol.Optional(
CONF_SETPOINT_SHIFT_MIN, default=DEFAULT_SETPOINT_SHIFT_MIN
): vol.All(int, vol.Range(min=-32, max=0)),
vol.Optional(
CONF_TEMPERATURE_STEP, default=DEFAULT_TEMPERATURE_STEP
): vol.All(float, vol.Range(min=0, max=2)),
vol.Required(CONF_TEMPERATURE_ADDRESS): ga_list_validator,
vol.Required(CONF_TARGET_TEMPERATURE_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_TARGET_TEMPERATURE_ADDRESS): ga_list_validator,
vol.Optional(CONF_SETPOINT_SHIFT_ADDRESS): ga_list_validator,
vol.Optional(CONF_SETPOINT_SHIFT_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_OPERATION_MODE_ADDRESS): ga_list_validator,
vol.Optional(CONF_OPERATION_MODE_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_CONTROLLER_STATUS_ADDRESS): ga_list_validator,
vol.Optional(CONF_CONTROLLER_STATUS_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_CONTROLLER_MODE_ADDRESS): ga_list_validator,
vol.Optional(CONF_CONTROLLER_MODE_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_HEAT_COOL_ADDRESS): ga_list_validator,
vol.Optional(CONF_HEAT_COOL_STATE_ADDRESS): ga_list_validator,
vol.Optional(
CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS
): ga_list_validator,
vol.Optional(CONF_OPERATION_MODE_NIGHT_ADDRESS): ga_list_validator,
vol.Optional(CONF_OPERATION_MODE_COMFORT_ADDRESS): ga_list_validator,
vol.Optional(CONF_OPERATION_MODE_STANDBY_ADDRESS): ga_list_validator,
vol.Optional(CONF_ON_OFF_ADDRESS): ga_list_validator,
vol.Optional(CONF_ON_OFF_STATE_ADDRESS): ga_list_validator,
vol.Optional(
CONF_ON_OFF_INVERT, default=DEFAULT_ON_OFF_INVERT
): cv.boolean,
vol.Optional(CONF_OPERATION_MODES): vol.All(
cv.ensure_list, [vol.In({**PRESET_MODES})]
),
vol.Optional(CONF_CONTROLLER_MODES): vol.All(
cv.ensure_list, [vol.In({**CONTROLLER_MODES})]
),
vol.Optional(CONF_MIN_TEMP): vol.Coerce(float),
vol.Optional(CONF_MAX_TEMP): vol.Coerce(float),
vol.Optional(
CONF_CREATE_TEMPERATURE_SENSORS, default=False
): cv.boolean,
}
),
)
class CoverSchema:
"""Voluptuous schema for KNX covers."""
CONF_MOVE_LONG_ADDRESS = "move_long_address"
CONF_MOVE_SHORT_ADDRESS = "move_short_address"
CONF_STOP_ADDRESS = "stop_address"
CONF_POSITION_ADDRESS = "position_address"
CONF_POSITION_STATE_ADDRESS = "position_state_address"
CONF_ANGLE_ADDRESS = "angle_address"
CONF_ANGLE_STATE_ADDRESS = "angle_state_address"
CONF_TRAVELLING_TIME_DOWN = "travelling_time_down"
CONF_TRAVELLING_TIME_UP = "travelling_time_up"
CONF_INVERT_POSITION = "invert_position"
CONF_INVERT_ANGLE = "invert_angle"
DEFAULT_TRAVEL_TIME = 25
DEFAULT_NAME = "KNX Cover"
SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MOVE_LONG_ADDRESS): ga_list_validator,
vol.Optional(CONF_MOVE_SHORT_ADDRESS): ga_list_validator,
vol.Optional(CONF_STOP_ADDRESS): ga_list_validator,
vol.Optional(CONF_POSITION_ADDRESS): ga_list_validator,
vol.Optional(CONF_POSITION_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_ANGLE_ADDRESS): ga_list_validator,
vol.Optional(CONF_ANGLE_STATE_ADDRESS): ga_list_validator,
vol.Optional(
CONF_TRAVELLING_TIME_DOWN, default=DEFAULT_TRAVEL_TIME
): cv.positive_float,
vol.Optional(
CONF_TRAVELLING_TIME_UP, default=DEFAULT_TRAVEL_TIME
): cv.positive_float,
vol.Optional(CONF_INVERT_POSITION, default=False): cv.boolean,
vol.Optional(CONF_INVERT_ANGLE, default=False): cv.boolean,
vol.Optional(CONF_DEVICE_CLASS): cv.string,
}
)
class ExposeSchema:
"""Voluptuous schema for KNX exposures."""
CONF_KNX_EXPOSE_TYPE = CONF_TYPE
CONF_KNX_EXPOSE_ATTRIBUTE = "attribute"
CONF_KNX_EXPOSE_DEFAULT = "default"
EXPOSE_TIME_TYPES = [
"time",
"date",
"datetime",
]
EXPOSE_TIME_SCHEMA = vol.Schema(
{
vol.Required(CONF_KNX_EXPOSE_TYPE): vol.All(
cv.string, str.lower, vol.In(EXPOSE_TIME_TYPES)
),
vol.Required(KNX_ADDRESS): ga_validator,
}
)
EXPOSE_SENSOR_SCHEMA = vol.Schema(
{
vol.Required(CONF_KNX_EXPOSE_TYPE): sensor_type_validator,
vol.Required(KNX_ADDRESS): ga_validator,
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Optional(CONF_KNX_EXPOSE_ATTRIBUTE): cv.string,
vol.Optional(CONF_KNX_EXPOSE_DEFAULT): cv.match_all,
}
)
SCHEMA = vol.Any(EXPOSE_TIME_SCHEMA, EXPOSE_SENSOR_SCHEMA)
class FanSchema:
"""Voluptuous schema for KNX fans."""
CONF_STATE_ADDRESS = CONF_STATE_ADDRESS
CONF_OSCILLATION_ADDRESS = "oscillation_address"
CONF_OSCILLATION_STATE_ADDRESS = "oscillation_state_address"
CONF_MAX_STEP = "max_step"
DEFAULT_NAME = "KNX Fan"
SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(KNX_ADDRESS): ga_list_validator,
vol.Optional(CONF_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_OSCILLATION_ADDRESS): ga_list_validator,
vol.Optional(CONF_OSCILLATION_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_MAX_STEP): cv.byte,
}
)
class LightSchema:
"""Voluptuous schema for KNX lights."""
CONF_STATE_ADDRESS = CONF_STATE_ADDRESS
CONF_BRIGHTNESS_ADDRESS = "brightness_address"
CONF_BRIGHTNESS_STATE_ADDRESS = "brightness_state_address"
CONF_COLOR_ADDRESS = "color_address"
CONF_COLOR_STATE_ADDRESS = "color_state_address"
CONF_COLOR_TEMP_ADDRESS = "color_temperature_address"
CONF_COLOR_TEMP_STATE_ADDRESS = "color_temperature_state_address"
CONF_COLOR_TEMP_MODE = "color_temperature_mode"
CONF_RGBW_ADDRESS = "rgbw_address"
CONF_RGBW_STATE_ADDRESS = "rgbw_state_address"
CONF_MIN_KELVIN = "min_kelvin"
CONF_MAX_KELVIN = "max_kelvin"
DEFAULT_NAME = "KNX Light"
DEFAULT_COLOR_TEMP_MODE = "absolute"
DEFAULT_MIN_KELVIN = 2700 # 370 mireds
DEFAULT_MAX_KELVIN = 6000 # 166 mireds
CONF_INDIVIDUAL_COLORS = "individual_colors"
CONF_RED = "red"
CONF_GREEN = "green"
CONF_BLUE = "blue"
CONF_WHITE = "white"
COLOR_SCHEMA = vol.Schema(
{
vol.Optional(KNX_ADDRESS): ga_list_validator,
vol.Optional(CONF_STATE_ADDRESS): ga_list_validator,
vol.Required(CONF_BRIGHTNESS_ADDRESS): ga_list_validator,
vol.Optional(CONF_BRIGHTNESS_STATE_ADDRESS): ga_list_validator,
}
)
SCHEMA = vol.All(
vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(KNX_ADDRESS): ga_list_validator,
vol.Optional(CONF_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_BRIGHTNESS_ADDRESS): ga_list_validator,
vol.Optional(CONF_BRIGHTNESS_STATE_ADDRESS): ga_list_validator,
vol.Exclusive(CONF_INDIVIDUAL_COLORS, "color"): {
vol.Inclusive(CONF_RED, "colors"): COLOR_SCHEMA,
vol.Inclusive(CONF_GREEN, "colors"): COLOR_SCHEMA,
vol.Inclusive(CONF_BLUE, "colors"): COLOR_SCHEMA,
vol.Optional(CONF_WHITE): COLOR_SCHEMA,
},
vol.Exclusive(CONF_COLOR_ADDRESS, "color"): ga_list_validator,
vol.Optional(CONF_COLOR_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_COLOR_TEMP_ADDRESS): ga_list_validator,
vol.Optional(CONF_COLOR_TEMP_STATE_ADDRESS): ga_list_validator,
vol.Optional(
CONF_COLOR_TEMP_MODE, default=DEFAULT_COLOR_TEMP_MODE
): vol.All(vol.Upper, cv.enum(ColorTempModes)),
vol.Exclusive(CONF_RGBW_ADDRESS, "color"): ga_list_validator,
vol.Optional(CONF_RGBW_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_MIN_KELVIN, default=DEFAULT_MIN_KELVIN): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
vol.Optional(CONF_MAX_KELVIN, default=DEFAULT_MAX_KELVIN): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
}
),
vol.Any(
# either global "address" or all addresses for individual colors are required
vol.Schema(
{
vol.Required(CONF_INDIVIDUAL_COLORS): {
vol.Required(CONF_RED): {vol.Required(KNX_ADDRESS): object},
vol.Required(CONF_GREEN): {vol.Required(KNX_ADDRESS): object},
vol.Required(CONF_BLUE): {vol.Required(KNX_ADDRESS): object},
},
},
extra=vol.ALLOW_EXTRA,
),
vol.Schema(
{
vol.Required(KNX_ADDRESS): object,
},
extra=vol.ALLOW_EXTRA,
),
),
)
class NotifySchema:
"""Voluptuous schema for KNX notifications."""
DEFAULT_NAME = "KNX Notify"
SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(KNX_ADDRESS): ga_validator,
}
)
class SceneSchema:
"""Voluptuous schema for KNX scenes."""
CONF_SCENE_NUMBER = "scene_number"
DEFAULT_NAME = "KNX SCENE"
SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(KNX_ADDRESS): ga_list_validator,
vol.Required(CONF_SCENE_NUMBER): cv.positive_int,
}
)
class SensorSchema:
"""Voluptuous schema for KNX sensors."""
CONF_ALWAYS_CALLBACK = "always_callback"
CONF_STATE_ADDRESS = CONF_STATE_ADDRESS
CONF_SYNC_STATE = CONF_SYNC_STATE
DEFAULT_NAME = "KNX Sensor"
SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SYNC_STATE, default=True): sync_state_validator,
vol.Optional(CONF_ALWAYS_CALLBACK, default=False): cv.boolean,
vol.Required(CONF_TYPE): sensor_type_validator,
vol.Required(CONF_STATE_ADDRESS): ga_list_validator,
}
)
class SwitchSchema:
"""Voluptuous schema for KNX switches."""
CONF_INVERT = CONF_INVERT
CONF_STATE_ADDRESS = CONF_STATE_ADDRESS
DEFAULT_NAME = "KNX Switch"
SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_INVERT, default=False): cv.boolean,
vol.Required(KNX_ADDRESS): ga_list_validator,
vol.Optional(CONF_STATE_ADDRESS): ga_list_validator,
}
)
class WeatherSchema:
"""Voluptuous schema for KNX weather station."""
CONF_SYNC_STATE = CONF_SYNC_STATE
CONF_KNX_TEMPERATURE_ADDRESS = "address_temperature"
CONF_KNX_BRIGHTNESS_SOUTH_ADDRESS = "address_brightness_south"
CONF_KNX_BRIGHTNESS_EAST_ADDRESS = "address_brightness_east"
CONF_KNX_BRIGHTNESS_WEST_ADDRESS = "address_brightness_west"
CONF_KNX_BRIGHTNESS_NORTH_ADDRESS = "address_brightness_north"
CONF_KNX_WIND_SPEED_ADDRESS = "address_wind_speed"
CONF_KNX_WIND_BEARING_ADDRESS = "address_wind_bearing"
CONF_KNX_RAIN_ALARM_ADDRESS = "address_rain_alarm"
CONF_KNX_FROST_ALARM_ADDRESS = "address_frost_alarm"
CONF_KNX_WIND_ALARM_ADDRESS = "address_wind_alarm"
CONF_KNX_DAY_NIGHT_ADDRESS = "address_day_night"
CONF_KNX_AIR_PRESSURE_ADDRESS = "address_air_pressure"
CONF_KNX_HUMIDITY_ADDRESS = "address_humidity"
CONF_KNX_CREATE_SENSORS = "create_sensors"
DEFAULT_NAME = "KNX Weather Station"
SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SYNC_STATE, default=True): sync_state_validator,
vol.Optional(CONF_KNX_CREATE_SENSORS, default=False): cv.boolean,
vol.Required(CONF_KNX_TEMPERATURE_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_BRIGHTNESS_SOUTH_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_BRIGHTNESS_EAST_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_BRIGHTNESS_WEST_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_BRIGHTNESS_NORTH_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_WIND_SPEED_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_WIND_BEARING_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_RAIN_ALARM_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_FROST_ALARM_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_WIND_ALARM_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_DAY_NIGHT_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_AIR_PRESSURE_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_HUMIDITY_ADDRESS): ga_list_validator,
}
)
| w1ll1am23/home-assistant | homeassistant/components/knx/schema.py | Python | apache-2.0 | 19,733 |
# -*- coding: utf-8 -*-
"""
This is the common settings file, intended to set sane defaults. If you have a
piece of configuration that's dependent on a set of feature flags being set,
then create a function that returns the calculated value based on the value of
FEATURES[...]. Modules that extend this one can change the feature
configuration in an environment specific config file and re-calculate those
values.
We should make a method that calls all these config methods so that you just
make one call at the end of your site-specific dev file to reset all the
dependent variables (like INSTALLED_APPS) for you.
Longer TODO:
1. Right now our treatment of static content in general and in particular
course-specific static content is haphazard.
2. We should have a more disciplined approach to feature flagging, even if it
just means that we stick them in a dict called FEATURES.
3. We need to handle configuration for multiple courses. This could be as
multiple sites, but we do need a way to map their data assets.
When refering to XBlocks, we use the entry-point name. For example,
| setup(
| name='xblock-foobar',
| version='0.1',
| packages=[
| 'foobar_xblock',
| ],
| entry_points={
| 'xblock.v1': [
| 'foobar-block = foobar_xblock:FoobarBlock',
| # ^^^^^^^^^^^^ This is the one you want.
| ]
| },
| )
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=unused-import
import imp
import os
import sys
import lms.envs.common
# Although this module itself may not use these imported variables, other dependent modules may.
from lms.envs.common import (
USE_TZ, TECH_SUPPORT_EMAIL, PLATFORM_NAME, BUGS_EMAIL, DOC_STORE_CONFIG, DATA_DIR, ALL_LANGUAGES, WIKI_ENABLED,
update_module_store_settings, ASSET_IGNORE_REGEX, COPYRIGHT_YEAR,
PARENTAL_CONSENT_AGE_LIMIT, COMPREHENSIVE_THEME_DIRS, REGISTRATION_EMAIL_PATTERNS_ALLOWED,
# The following PROFILE_IMAGE_* settings are included as they are
# indirectly accessed through the email opt-in API, which is
# technically accessible through the CMS via legacy URLs.
PROFILE_IMAGE_BACKEND, PROFILE_IMAGE_DEFAULT_FILENAME, PROFILE_IMAGE_DEFAULT_FILE_EXTENSION,
PROFILE_IMAGE_SECRET_KEY, PROFILE_IMAGE_MIN_BYTES, PROFILE_IMAGE_MAX_BYTES,
# The following setting is included as it is used to check whether to
# display credit eligibility table on the CMS or not.
ENABLE_CREDIT_ELIGIBILITY, YOUTUBE_API_KEY,
DEFAULT_COURSE_ABOUT_IMAGE_URL,
# Django REST framework configuration
REST_FRAMEWORK,
STATICI18N_OUTPUT_DIR,
# Theme to use when no site or site theme is defined,
DEFAULT_SITE_THEME,
# Default site to use if no site exists matching request headers
SITE_ID,
# Enable or disable theming
ENABLE_COMPREHENSIVE_THEMING,
# constants for redirects app
REDIRECT_CACHE_TIMEOUT,
REDIRECT_CACHE_KEY_PREFIX,
JWT_AUTH,
# django-debug-toolbar
DEBUG_TOOLBAR_PATCH_SETTINGS,
BLOCK_STRUCTURES_SETTINGS,
)
from path import Path as path
from warnings import simplefilter
from lms.djangoapps.lms_xblock.mixin import LmsBlockMixin
from cms.lib.xblock.authoring_mixin import AuthoringMixin
import dealer.git
from xmodule.modulestore.edit_info import EditInfoMixin
from xmodule.mixin import LicenseMixin
############################ FEATURE CONFIGURATION #############################
# Dummy secret key for dev/test
SECRET_KEY = 'dev key'
STUDIO_NAME = "Studio"
STUDIO_SHORT_NAME = "Studio"
FEATURES = {
'GITHUB_PUSH': False,
# for consistency in user-experience, keep the value of the following 3 settings
# in sync with the ones in lms/envs/common.py
'ENABLE_DISCUSSION_SERVICE': True,
'ENABLE_TEXTBOOK': True,
'ENABLE_STUDENT_NOTES': True,
'AUTH_USE_CERTIFICATES': False,
# email address for studio staff (eg to request course creation)
'STUDIO_REQUEST_EMAIL': '',
# Segment - must explicitly turn it on for production
'CMS_SEGMENT_KEY': None,
# Enable URL that shows information about the status of various services
'ENABLE_SERVICE_STATUS': False,
# Don't autoplay videos for course authors
'AUTOPLAY_VIDEOS': False,
# If set to True, new Studio users won't be able to author courses unless
# edX has explicitly added them to the course creator group.
'ENABLE_CREATOR_GROUP': False,
# whether to use password policy enforcement or not
'ENFORCE_PASSWORD_POLICY': False,
# Turn off account locking if failed login attempts exceeds a limit
'ENABLE_MAX_FAILED_LOGIN_ATTEMPTS': False,
# Allow editing of short description in course settings in cms
'EDITABLE_SHORT_DESCRIPTION': True,
# Hide any Personally Identifiable Information from application logs
'SQUELCH_PII_IN_LOGS': False,
# Toggles the embargo functionality, which blocks users
# based on their location.
'EMBARGO': False,
# Turn on/off Microsites feature
'USE_MICROSITES': False,
# Allow creating courses with non-ascii characters in the course id
'ALLOW_UNICODE_COURSE_ID': False,
# Prevent concurrent logins per user
'PREVENT_CONCURRENT_LOGINS': False,
# Turn off Advanced Security by default
'ADVANCED_SECURITY': False,
# Turn off Video Upload Pipeline through Studio, by default
'ENABLE_VIDEO_UPLOAD_PIPELINE': False,
# let students save and manage their annotations
# for consistency in user-experience, keep the value of this feature flag
# in sync with the one in lms/envs/common.py
'ENABLE_EDXNOTES': False,
# Enable support for content libraries. Note that content libraries are
# only supported in courses using split mongo.
'ENABLE_CONTENT_LIBRARIES': True,
# Milestones application flag
'MILESTONES_APP': False,
# Prerequisite courses feature flag
'ENABLE_PREREQUISITE_COURSES': False,
# Toggle course entrance exams feature
'ENTRANCE_EXAMS': False,
# Toggle platform-wide course licensing
'LICENSING': False,
# Enable the courseware search functionality
'ENABLE_COURSEWARE_INDEX': False,
# Enable content libraries search functionality
'ENABLE_LIBRARY_INDEX': False,
# Enable course reruns, which will always use the split modulestore
'ALLOW_COURSE_RERUNS': True,
# Certificates Web/HTML Views
'CERTIFICATES_HTML_VIEW': False,
# Teams feature
'ENABLE_TEAMS': True,
# Show video bumper in Studio
'ENABLE_VIDEO_BUMPER': False,
# How many seconds to show the bumper again, default is 7 days:
'SHOW_BUMPER_PERIODICITY': 7 * 24 * 3600,
# Enable credit eligibility feature
'ENABLE_CREDIT_ELIGIBILITY': ENABLE_CREDIT_ELIGIBILITY,
# Can the visibility of the discussion tab be configured on a per-course basis?
'ALLOW_HIDING_DISCUSSION_TAB': False,
# Special Exams, aka Timed and Proctored Exams
'ENABLE_SPECIAL_EXAMS': False,
'ORGANIZATIONS_APP': False,
# Show Language selector
'SHOW_LANGUAGE_SELECTOR': False,
}
ENABLE_JASMINE = False
############################# SOCIAL MEDIA SHARING #############################
SOCIAL_SHARING_SETTINGS = {
# Note: Ensure 'CUSTOM_COURSE_URLS' has a matching value in lms/envs/common.py
'CUSTOM_COURSE_URLS': False
}
############################# SET PATH INFORMATION #############################
PROJECT_ROOT = path(__file__).abspath().dirname().dirname() # /edx-platform/cms
REPO_ROOT = PROJECT_ROOT.dirname()
COMMON_ROOT = REPO_ROOT / "common"
LMS_ROOT = REPO_ROOT / "lms"
ENV_ROOT = REPO_ROOT.dirname() # virtualenv dir /edx-platform is in
GITHUB_REPO_ROOT = ENV_ROOT / "data"
sys.path.append(REPO_ROOT)
sys.path.append(PROJECT_ROOT / 'djangoapps')
sys.path.append(COMMON_ROOT / 'djangoapps')
# For geolocation ip database
GEOIP_PATH = REPO_ROOT / "common/static/data/geoip/GeoIP.dat"
GEOIPV6_PATH = REPO_ROOT / "common/static/data/geoip/GeoIPv6.dat"
############################# TEMPLATE CONFIGURATION #############################
# Mako templating
# TODO: Move the Mako templating into a different engine in TEMPLATES below.
import tempfile
MAKO_MODULE_DIR = os.path.join(tempfile.gettempdir(), 'mako_cms')
MAKO_TEMPLATES = {}
MAKO_TEMPLATES['main'] = [
PROJECT_ROOT / 'templates',
COMMON_ROOT / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_mako' / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_js' / 'templates',
COMMON_ROOT / 'static', # required to statically include common Underscore templates
]
for namespace, template_dirs in lms.envs.common.MAKO_TEMPLATES.iteritems():
MAKO_TEMPLATES['lms.' + namespace] = template_dirs
# Django templating
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# Don't look for template source files inside installed applications.
'APP_DIRS': False,
# Instead, look for template source files in these dirs.
'DIRS': MAKO_TEMPLATES['main'],
# Options specific to this backend.
'OPTIONS': {
'loaders': (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
),
'context_processors': (
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.i18n',
'django.contrib.auth.context_processors.auth', # this is required for admin
'django.template.context_processors.csrf',
'dealer.contrib.django.staff.context_processor', # access git revision
'contentstore.context_processors.doc_url',
),
# Change 'debug' in your environment settings files - not here.
'debug': False
}
}
]
DEFAULT_TEMPLATE_ENGINE = TEMPLATES[0]
##############################################################################
EDX_ROOT_URL = ''
LOGIN_REDIRECT_URL = EDX_ROOT_URL + '/signin'
LOGIN_URL = EDX_ROOT_URL + '/signin'
# use the ratelimit backend to prevent brute force attacks
AUTHENTICATION_BACKENDS = (
'ratelimitbackend.backends.RateLimitModelBackend',
)
LMS_BASE = None
LMS_ROOT_URL = "http://localhost:8000"
# These are standard regexes for pulling out info like course_ids, usage_ids, etc.
# They are used so that URLs with deprecated-format strings still work.
from lms.envs.common import (
COURSE_KEY_PATTERN, COURSE_ID_PATTERN, USAGE_KEY_PATTERN, ASSET_KEY_PATTERN
)
######################### CSRF #########################################
# Forwards-compatibility with Django 1.7
CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52
# It is highly recommended that you override this in any environment accessed by
# end users
CSRF_COOKIE_SECURE = False
#################### CAPA External Code Evaluation #############################
XQUEUE_INTERFACE = {
'url': 'http://localhost:8888',
'django_auth': {'username': 'local',
'password': 'local'},
'basic_auth': None,
}
################################# Deprecation warnings #####################
# Ignore deprecation warnings (so we don't clutter Jenkins builds/production)
simplefilter('ignore')
################################# Middleware ###################################
MIDDLEWARE_CLASSES = (
'crum.CurrentRequestUserMiddleware',
'request_cache.middleware.RequestCache',
'header_control.middleware.HeaderControlMiddleware',
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sites.middleware.CurrentSiteMiddleware',
# Instead of SessionMiddleware, we use a more secure version
# 'django.contrib.sessions.middleware.SessionMiddleware',
'openedx.core.djangoapps.safe_sessions.middleware.SafeSessionMiddleware',
'method_override.middleware.MethodOverrideMiddleware',
# Instead of AuthenticationMiddleware, we use a cache-backed version
'cache_toolbox.middleware.CacheBackedAuthenticationMiddleware',
# Enable SessionAuthenticationMiddleware in order to invalidate
# user sessions after a password change.
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'student.middleware.UserStandingMiddleware',
'contentserver.middleware.StaticContentServer',
'django.contrib.messages.middleware.MessageMiddleware',
'track.middleware.TrackMiddleware',
# This is used to set or update the user language preferences.
'lang_pref.middleware.LanguagePreferenceMiddleware',
# Allows us to dark-launch particular languages
'dark_lang.middleware.DarkLangMiddleware',
'embargo.middleware.EmbargoMiddleware',
# Detects user-requested locale from 'accept-language' header in http request
'django.middleware.locale.LocaleMiddleware',
'codejail.django_integration.ConfigureCodeJailMiddleware',
# catches any uncaught RateLimitExceptions and returns a 403 instead of a 500
'ratelimitbackend.middleware.RateLimitMiddleware',
# for expiring inactive sessions
'session_inactivity_timeout.middleware.SessionInactivityTimeout',
'openedx.core.djangoapps.theming.middleware.CurrentSiteThemeMiddleware',
# use Django built in clickjacking protection
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# Clickjacking protection can be enabled by setting this to 'DENY'
X_FRAME_OPTIONS = 'ALLOW'
# Platform for Privacy Preferences header
P3P_HEADER = 'CP="Open EdX does not have a P3P policy."'
############# XBlock Configuration ##########
# Import after sys.path fixup
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.modulestore import prefer_xmodules
from xmodule.x_module import XModuleMixin
# These are the Mixins that should be added to every XBlock.
# This should be moved into an XBlock Runtime/Application object
# once the responsibility of XBlock creation is moved out of modulestore - cpennington
XBLOCK_MIXINS = (
LmsBlockMixin,
InheritanceMixin,
XModuleMixin,
EditInfoMixin,
AuthoringMixin,
)
XBLOCK_SELECT_FUNCTION = prefer_xmodules
# Paths to wrapper methods which should be applied to every XBlock's FieldData.
XBLOCK_FIELD_DATA_WRAPPERS = ()
############################ Modulestore Configuration ################################
MODULESTORE_BRANCH = 'draft-preferred'
MODULESTORE = {
'default': {
'ENGINE': 'xmodule.modulestore.mixed.MixedModuleStore',
'OPTIONS': {
'mappings': {},
'stores': [
{
'NAME': 'split',
'ENGINE': 'xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': {
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
'fs_root': DATA_DIR,
'render_template': 'edxmako.shortcuts.render_to_string',
}
},
{
'NAME': 'draft',
'ENGINE': 'xmodule.modulestore.mongo.DraftMongoModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': {
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
'fs_root': DATA_DIR,
'render_template': 'edxmako.shortcuts.render_to_string',
}
}
]
}
}
}
# Modulestore-level field override providers. These field override providers don't
# require student context.
MODULESTORE_FIELD_OVERRIDE_PROVIDERS = ()
#################### Python sandbox ############################################
CODE_JAIL = {
# Path to a sandboxed Python executable. None means don't bother.
'python_bin': None,
# User to run as in the sandbox.
'user': 'sandbox',
# Configurable limits.
'limits': {
# How many CPU seconds can jailed code use?
'CPU': 1,
},
}
############################ DJANGO_BUILTINS ################################
# Change DEBUG in your environment settings files, not here
DEBUG = False
SESSION_COOKIE_SECURE = False
SESSION_SAVE_EVERY_REQUEST = False
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
# Site info
SITE_NAME = "localhost:8001"
HTTPS = 'on'
ROOT_URLCONF = 'cms.urls'
# Email
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_HOST = 'localhost'
EMAIL_PORT = 25
EMAIL_USE_TLS = False
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
DEFAULT_FROM_EMAIL = 'registration@example.com'
DEFAULT_FEEDBACK_EMAIL = 'feedback@example.com'
SERVER_EMAIL = 'devops@example.com'
ADMINS = ()
MANAGERS = ADMINS
EDX_PLATFORM_REVISION = os.environ.get('EDX_PLATFORM_REVISION')
if not EDX_PLATFORM_REVISION:
try:
# Get git revision of the current file
EDX_PLATFORM_REVISION = dealer.git.Backend(path=REPO_ROOT).revision
except TypeError:
# Not a git repository
EDX_PLATFORM_REVISION = 'unknown'
# Static content
STATIC_URL = '/static/' + EDX_PLATFORM_REVISION + "/"
STATIC_ROOT = ENV_ROOT / "staticfiles" / EDX_PLATFORM_REVISION
STATICFILES_DIRS = [
COMMON_ROOT / "static",
PROJECT_ROOT / "static",
# This is how you would use the textbook images locally
# ("book", ENV_ROOT / "book_images"),
]
# Locale/Internationalization
TIME_ZONE = 'America/New_York' # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
LANGUAGE_CODE = 'en' # http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGES_BIDI = lms.envs.common.LANGUAGES_BIDI
LANGUAGES = lms.envs.common.LANGUAGES
LANGUAGE_DICT = dict(LANGUAGES)
USE_I18N = True
USE_L10N = True
STATICI18N_ROOT = PROJECT_ROOT / "static"
# Localization strings (e.g. django.po) are under this directory
LOCALE_PATHS = (REPO_ROOT + '/conf/locale',) # edx-platform/conf/locale/
# Messages
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
##### EMBARGO #####
EMBARGO_SITE_REDIRECT_URL = None
############################### PIPELINE #######################################
PIPELINE_ENABLED = True
STATICFILES_STORAGE = 'openedx.core.storage.ProductionStorage'
# List of finder classes that know how to find static files in various locations.
# Note: the pipeline finder is included to be able to discover optimized files
STATICFILES_FINDERS = [
'openedx.core.djangoapps.theming.finders.ThemeFilesFinder',
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'openedx.core.lib.xblock_pipeline.finder.XBlockPipelineFinder',
'pipeline.finders.PipelineFinder',
]
# Don't use compression by default
PIPELINE_CSS_COMPRESSOR = None
PIPELINE_JS_COMPRESSOR = None
from openedx.core.lib.rooted_paths import rooted_glob
PIPELINE_CSS = {
'style-vendor': {
'source_filenames': [
'css/vendor/normalize.css',
'css/vendor/font-awesome.css',
'css/vendor/html5-input-polyfills/number-polyfill.css',
'js/vendor/CodeMirror/codemirror.css',
'css/vendor/ui-lightness/jquery-ui-1.8.22.custom.css',
'css/vendor/jquery.qtip.min.css',
'js/vendor/markitup/skins/simple/style.css',
'js/vendor/markitup/sets/wiki/style.css',
],
'output_filename': 'css/cms-style-vendor.css',
},
'style-vendor-tinymce-content': {
'source_filenames': [
'css/tinymce-studio-content-fonts.css',
'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/content.min.css',
'css/tinymce-studio-content.css'
],
'output_filename': 'css/cms-style-vendor-tinymce-content.css',
},
'style-vendor-tinymce-skin': {
'source_filenames': [
'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/skin.min.css'
],
'output_filename': 'css/cms-style-vendor-tinymce-skin.css',
},
'style-main-v1': {
'source_filenames': [
'css/studio-main-v1.css',
],
'output_filename': 'css/studio-main-v1.css',
},
'style-main-v1-rtl': {
'source_filenames': [
'css/studio-main-v1-rtl.css',
],
'output_filename': 'css/studio-main-v1-rtl.css',
},
'style-main-v2': {
'source_filenames': [
'css/studio-main-v2.css',
],
'output_filename': 'css/studio-main-v2.css',
},
'style-main-v2-rtl': {
'source_filenames': [
'css/studio-main-v2-rtl.css',
],
'output_filename': 'css/studio-main-v2-rtl.css',
},
'style-xmodule-annotations': {
'source_filenames': [
'css/vendor/ova/annotator.css',
'css/vendor/ova/edx-annotator.css',
'css/vendor/ova/video-js.min.css',
'css/vendor/ova/rangeslider.css',
'css/vendor/ova/share-annotator.css',
'css/vendor/ova/richText-annotator.css',
'css/vendor/ova/tags-annotator.css',
'css/vendor/ova/flagging-annotator.css',
'css/vendor/ova/diacritic-annotator.css',
'css/vendor/ova/grouping-annotator.css',
'css/vendor/ova/ova.css',
'js/vendor/ova/catch/css/main.css'
],
'output_filename': 'css/cms-style-xmodule-annotations.css',
},
}
# test_order: Determines the position of this chunk of javascript on
# the jasmine test page
PIPELINE_JS = {
'module-js': {
'source_filenames': (
rooted_glob(COMMON_ROOT / 'static/', 'xmodule/descriptors/js/*.js') +
rooted_glob(COMMON_ROOT / 'static/', 'xmodule/modules/js/*.js') +
rooted_glob(COMMON_ROOT / 'static/', 'common/js/discussion/*.js')
),
'output_filename': 'js/cms-modules.js',
'test_order': 1
},
}
PIPELINE_COMPILERS = (
'pipeline.compilers.coffee.CoffeeScriptCompiler',
)
PIPELINE_CSS_COMPRESSOR = None
PIPELINE_JS_COMPRESSOR = None
STATICFILES_IGNORE_PATTERNS = (
"*.py",
"*.pyc",
# It would be nice if we could do, for example, "**/*.scss",
# but these strings get passed down to the `fnmatch` module,
# which doesn't support that. :(
# http://docs.python.org/2/library/fnmatch.html
"sass/*.scss",
"sass/*/*.scss",
"sass/*/*/*.scss",
"sass/*/*/*/*.scss",
"coffee/*.coffee",
"coffee/*/*.coffee",
"coffee/*/*/*.coffee",
"coffee/*/*/*/*.coffee",
# Ignore tests
"spec",
"spec_helpers",
# Symlinks used by js-test-tool
"xmodule_js",
"common_static",
)
PIPELINE_YUI_BINARY = 'yui-compressor'
################################# DJANGO-REQUIRE ###############################
# The baseUrl to pass to the r.js optimizer, relative to STATIC_ROOT.
REQUIRE_BASE_URL = "./"
# The name of a build profile to use for your project, relative to REQUIRE_BASE_URL.
# A sensible value would be 'app.build.js'. Leave blank to use the built-in default build profile.
# Set to False to disable running the default profile (e.g. if only using it to build Standalone
# Modules)
REQUIRE_BUILD_PROFILE = "cms/js/build.js"
# The name of the require.js script used by your project, relative to REQUIRE_BASE_URL.
REQUIRE_JS = "js/vendor/requiresjs/require.js"
# A dictionary of standalone modules to build with almond.js.
REQUIRE_STANDALONE_MODULES = {}
# Whether to run django-require in debug mode.
REQUIRE_DEBUG = False
# A tuple of files to exclude from the compilation result of r.js.
REQUIRE_EXCLUDE = ("build.txt",)
# The execution environment in which to run r.js: auto, node or rhino.
# auto will autodetect the environment and make use of node if available and
# rhino if not.
# It can also be a path to a custom class that subclasses
# require.environments.Environment and defines some "args" function that
# returns a list with the command arguments to execute.
REQUIRE_ENVIRONMENT = "node"
################################# CELERY ######################################
# Message configuration
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_MESSAGE_COMPRESSION = 'gzip'
# Results configuration
CELERY_IGNORE_RESULT = False
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
# Events configuration
CELERY_TRACK_STARTED = True
CELERY_SEND_EVENTS = True
CELERY_SEND_TASK_SENT_EVENT = True
# Exchange configuration
CELERY_DEFAULT_EXCHANGE = 'edx.core'
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
# Queues configuration
HIGH_PRIORITY_QUEUE = 'edx.core.high'
DEFAULT_PRIORITY_QUEUE = 'edx.core.default'
LOW_PRIORITY_QUEUE = 'edx.core.low'
CELERY_QUEUE_HA_POLICY = 'all'
CELERY_CREATE_MISSING_QUEUES = True
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {}
}
############################## Video ##########################################
YOUTUBE = {
# YouTube JavaScript API
'API': 'https://www.youtube.com/iframe_api',
# URL to get YouTube metadata
'METADATA_URL': 'https://www.googleapis.com/youtube/v3/videos',
# Current youtube api for requesting transcripts.
# For example: http://video.google.com/timedtext?lang=en&v=j_jEn79vS3g.
'TEXT_API': {
'url': 'video.google.com/timedtext',
'params': {
'lang': 'en',
'v': 'set_youtube_id_of_11_symbols_here',
},
},
'IMAGE_API': 'http://img.youtube.com/vi/{youtube_id}/0.jpg', # /maxresdefault.jpg for 1920*1080
}
############################# VIDEO UPLOAD PIPELINE #############################
VIDEO_UPLOAD_PIPELINE = {
'BUCKET': '',
'ROOT_PATH': '',
'CONCURRENT_UPLOAD_LIMIT': 4,
}
############################ APPS #####################################
INSTALLED_APPS = (
# Standard apps
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.redirects',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'djcelery',
'method_override',
# Common views
'openedx.core.djangoapps.common_views',
# History tables
'simple_history',
# Database-backed configuration
'config_models',
# Monitor the status of services
'service_status',
# Testing
'django_nose',
# For CMS
'contentstore',
'contentserver',
'course_creators',
'external_auth',
'student', # misleading name due to sharing with lms
'openedx.core.djangoapps.course_groups', # not used in cms (yet), but tests run
'openedx.core.djangoapps.coursetalk', # not used in cms (yet), but tests run
'xblock_config',
# Maintenance tools
'maintenance',
# Tracking
'track',
'eventtracking.django.apps.EventTrackingConfig',
# Monitoring
'datadog',
# For asset pipelining
'edxmako',
'pipeline',
'static_replace',
'require',
# Theming
'openedx.core.djangoapps.theming',
# Site configuration for theming and behavioral modification
'openedx.core.djangoapps.site_configuration',
# comment common
'django_comment_common',
# for course creator table
'django.contrib.admin',
# for managing course modes
'course_modes',
# Dark-launching languages
'dark_lang',
# Student identity reverification
'reverification',
# User preferences
'openedx.core.djangoapps.user_api',
'django_openid_auth',
'embargo',
# Monitoring signals
'monitoring',
# Course action state
'course_action_state',
# Additional problem types
'edx_jsme', # Molecular Structure
'openedx.core.djangoapps.content.course_overviews',
'openedx.core.djangoapps.content.course_structures.apps.CourseStructuresConfig',
'openedx.core.djangoapps.content.block_structure.apps.BlockStructureConfig',
# Credit courses
'openedx.core.djangoapps.credit',
'xblock_django',
# edX Proctoring
'edx_proctoring',
# Bookmarks
'openedx.core.djangoapps.bookmarks',
# programs support
'openedx.core.djangoapps.programs',
# Catalog integration
'openedx.core.djangoapps.catalog',
# Self-paced course configuration
'openedx.core.djangoapps.self_paced',
# django-oauth2-provider (deprecated)
'provider',
'provider.oauth2',
'edx_oauth2_provider',
# django-oauth-toolkit
'oauth2_provider',
# These are apps that aren't strictly needed by Studio, but are imported by
# other apps that are. Django 1.8 wants to have imported models supported
# by installed apps.
'lms.djangoapps.verify_student',
'lms.djangoapps.grades',
# Microsite configuration application
'microsite_configuration',
# edx-milestones service
'milestones',
# Static i18n support
'statici18n',
# Tagging
'cms.lib.xblock.tagging',
# Enables default site and redirects
'django_sites_extensions',
# additional release utilities to ease automation
'release_util'
)
################# EDX MARKETING SITE ##################################
EDXMKTG_LOGGED_IN_COOKIE_NAME = 'edxloggedin'
EDXMKTG_USER_INFO_COOKIE_NAME = 'edx-user-info'
EDXMKTG_USER_INFO_COOKIE_VERSION = 1
MKTG_URLS = {}
MKTG_URL_LINK_MAP = {
}
COURSES_WITH_UNSAFE_CODE = []
############################## EVENT TRACKING #################################
TRACK_MAX_EVENT = 50000
TRACKING_BACKENDS = {
'logger': {
'ENGINE': 'track.backends.logger.LoggerBackend',
'OPTIONS': {
'name': 'tracking'
}
}
}
# We're already logging events, and we don't want to capture user
# names/passwords. Heartbeat events are likely not interesting.
TRACKING_IGNORE_URL_PATTERNS = [r'^/event', r'^/login', r'^/heartbeat']
EVENT_TRACKING_ENABLED = True
EVENT_TRACKING_BACKENDS = {
'tracking_logs': {
'ENGINE': 'eventtracking.backends.routing.RoutingBackend',
'OPTIONS': {
'backends': {
'logger': {
'ENGINE': 'eventtracking.backends.logger.LoggerBackend',
'OPTIONS': {
'name': 'tracking',
'max_event_size': TRACK_MAX_EVENT,
}
}
},
'processors': [
{'ENGINE': 'track.shim.LegacyFieldMappingProcessor'},
{'ENGINE': 'track.shim.PrefixedEventProcessor'}
]
}
},
'segmentio': {
'ENGINE': 'eventtracking.backends.routing.RoutingBackend',
'OPTIONS': {
'backends': {
'segment': {'ENGINE': 'eventtracking.backends.segment.SegmentBackend'}
},
'processors': [
{
'ENGINE': 'eventtracking.processors.whitelist.NameWhitelistProcessor',
'OPTIONS': {
'whitelist': []
}
},
{
'ENGINE': 'track.shim.GoogleAnalyticsProcessor'
}
]
}
}
}
EVENT_TRACKING_PROCESSORS = []
#### PASSWORD POLICY SETTINGS #####
PASSWORD_MIN_LENGTH = None
PASSWORD_MAX_LENGTH = None
PASSWORD_COMPLEXITY = {}
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = None
PASSWORD_DICTIONARY = []
##### ACCOUNT LOCKOUT DEFAULT PARAMETERS #####
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = 5
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = 15 * 60
### Apps only installed in some instances
OPTIONAL_APPS = (
'mentoring',
'problem_builder',
'edx_sga',
# edx-ora2
'submissions',
'openassessment',
'openassessment.assessment',
'openassessment.fileupload',
'openassessment.workflow',
'openassessment.xblock',
# edxval
'edxval',
# Organizations App (http://github.com/edx/edx-organizations)
'organizations',
)
for app_name in OPTIONAL_APPS:
# First attempt to only find the module rather than actually importing it,
# to avoid circular references - only try to import if it can't be found
# by find_module, which doesn't work with import hooks
try:
imp.find_module(app_name)
except ImportError:
try:
__import__(app_name)
except ImportError:
continue
INSTALLED_APPS += (app_name,)
### ADVANCED_SECURITY_CONFIG
# Empty by default
ADVANCED_SECURITY_CONFIG = {}
### External auth usage -- prefixes for ENROLLMENT_DOMAIN
SHIBBOLETH_DOMAIN_PREFIX = 'shib:'
OPENID_DOMAIN_PREFIX = 'openid:'
### Size of chunks into which asset uploads will be divided
UPLOAD_CHUNK_SIZE_IN_MB = 10
### Max size of asset uploads to GridFS
MAX_ASSET_UPLOAD_FILE_SIZE_IN_MB = 10
# FAQ url to direct users to if they upload
# a file that exceeds the above size
MAX_ASSET_UPLOAD_FILE_SIZE_URL = ""
### Default value for entrance exam minimum score
ENTRANCE_EXAM_MIN_SCORE_PCT = 50
### Default language for a new course
DEFAULT_COURSE_LANGUAGE = "en"
# Specify XBlocks that should be treated as advanced problems. Each entry is a
# dict:
# 'component': the entry-point name of the XBlock.
# 'boilerplate_name': an optional YAML template to be used. Specify as
# None to omit.
#
ADVANCED_PROBLEM_TYPES = [
{
'component': 'openassessment',
'boilerplate_name': None,
},
]
# Files and Uploads type filter values
FILES_AND_UPLOAD_TYPE_FILTERS = {
"Images": ['image/png', 'image/jpeg', 'image/jpg', 'image/gif', 'image/tiff', 'image/tif', 'image/x-icon'],
"Documents": [
'application/pdf',
'text/plain',
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'application/vnd.openxmlformats-officedocument.wordprocessingml.template',
'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'application/vnd.openxmlformats-officedocument.presentationml.slideshow',
'application/vnd.openxmlformats-officedocument.presentationml.template',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'application/vnd.openxmlformats-officedocument.spreadsheetml.template',
'application/msword',
'application/vnd.ms-excel',
'application/vnd.ms-powerpoint',
],
}
# Default to no Search Engine
SEARCH_ENGINE = None
ELASTIC_FIELD_MAPPINGS = {
"start_date": {
"type": "date"
}
}
XBLOCK_SETTINGS = {
"VideoDescriptor": {
"licensing_enabled": FEATURES.get("LICENSING", False)
},
'VideoModule': {
'YOUTUBE_API_KEY': YOUTUBE_API_KEY
}
}
################################ Settings for Credit Course Requirements ################################
# Initial delay used for retrying tasks.
# Additional retries use longer delays.
# Value is in seconds.
CREDIT_TASK_DEFAULT_RETRY_DELAY = 30
# Maximum number of retries per task for errors that are not related
# to throttling.
CREDIT_TASK_MAX_RETRIES = 5
# Maximum age in seconds of timestamps we will accept
# when a credit provider notifies us that a student has been approved
# or denied for credit.
CREDIT_PROVIDER_TIMESTAMP_EXPIRATION = 15 * 60
################################ Settings for Microsites ################################
### Select an implementation for the microsite backend
# for MICROSITE_BACKEND possible choices are
# 1. microsite_configuration.backends.filebased.FilebasedMicrositeBackend
# 2. microsite_configuration.backends.database.DatabaseMicrositeBackend
MICROSITE_BACKEND = 'microsite_configuration.backends.filebased.FilebasedMicrositeBackend'
# for MICROSITE_TEMPLATE_BACKEND possible choices are
# 1. microsite_configuration.backends.filebased.FilebasedMicrositeTemplateBackend
# 2. microsite_configuration.backends.database.DatabaseMicrositeTemplateBackend
MICROSITE_TEMPLATE_BACKEND = 'microsite_configuration.backends.filebased.FilebasedMicrositeTemplateBackend'
# TTL for microsite database template cache
MICROSITE_DATABASE_TEMPLATE_CACHE_TTL = 5 * 60
############################### PROCTORING CONFIGURATION DEFAULTS ##############
PROCTORING_BACKEND_PROVIDER = {
'class': 'edx_proctoring.backends.null.NullBackendProvider',
'options': {},
}
PROCTORING_SETTINGS = {}
############################ Global Database Configuration #####################
DATABASE_ROUTERS = [
'openedx.core.lib.django_courseware_routers.StudentModuleHistoryExtendedRouter',
]
############################ OAUTH2 Provider ###################################
# OpenID Connect issuer ID. Normally the URL of the authentication endpoint.
OAUTH_OIDC_ISSUER = 'https://www.example.com/oauth2'
# 5 minute expiration time for JWT id tokens issued for external API requests.
OAUTH_ID_TOKEN_EXPIRATION = 5 * 60
USERNAME_PATTERN = r'(?P<username>[\w.@+-]+)'
# Partner support link for CMS footer
PARTNER_SUPPORT_EMAIL = ''
# Affiliate cookie tracking
AFFILIATE_COOKIE_NAME = 'affiliate_id'
############## Settings for Studio Context Sensitive Help ##############
DOC_LINK_BASE_URL = None
| louyihua/edx-platform | cms/envs/common.py | Python | agpl-3.0 | 37,478 |
from distutils.core import setup
setup(name='Critical Py',
version='0.1',
description='Critical path calcuation',
author='David Henderson',
author_email='david.henderson82@gmail.com',
url='https://github.com/dhenderson/criticalpy',
packages=['criticalpy']
) | dhenderson/criticalpy | setup.py | Python | mit | 299 |
"""
Test the script webservice-image-digest by calling this from
the tests directory
cd compose_setup/scripts/tests
python test-webservice-image-digest.py
"""
import unittest
import subprocess
script_location = "../webservice-image-digest.py"
base_command = "python {}".format(script_location)
branch = "develop"
simple_tag = "digest_test"
annotated_tag = "1.12.0-beta.1"
class TestDigest(unittest.TestCase):
def test_branch(self):
cmd = "{} {}".format(base_command, branch)
ret = subprocess.check_output(cmd, shell=True, universal_newlines=True).rstrip()
self.assertEqual(ret, "sha256:52cf6b09e89a238bfd1d98dd01139442d67fcaaa377c179f315dd06555f7bcae")
pass
def test_simple_tag(self):
cmd = "{} {}".format(base_command, simple_tag)
ret = subprocess.check_output(cmd, shell=True, universal_newlines=True).rstrip()
self.assertEqual(ret, "sha256:f21d00e9f01d54eb891c128fb88b76554cb0b47c775929dc05e39a03954e7b0b")
pass
def test_annotated_tag(self):
cmd = "{} {}".format(base_command, annotated_tag)
ret = subprocess.check_output(cmd, shell=True, universal_newlines=True).rstrip()
self.assertEqual(ret, "sha256:e6dcfdc9ea351b57cde556ff3c68f96b838e8e30cdb4ee693a29b6ef16f3a4be")
pass
if __name__ == '__main__':
unittest.main()
| dockstore/compose_setup | scripts/tests/test-webservice-image-digest.py | Python | apache-2.0 | 1,346 |
""" $lic$
Copyright (C) 2016-2020 by Tsinghua University and The Board of Trustees of
Stanford University
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
from nn_dataflow.core import BufShrScheme
from nn_dataflow.core import DataCategoryEnum as de
from nn_dataflow.core import loop_blocking
from nn_dataflow.core import LoopBlockingScheme
from nn_dataflow.core import LoopEnum as le
from nn_dataflow.core import ParallelEnum as pe
from nn_dataflow.core import PartitionScheme
from nn_dataflow import util
from . import TestLoopBlockingFixture
class TestLoopBlockingPartition(TestLoopBlockingFixture):
''' Tests for LoopBlocking module with partitioning. '''
def setUp(self):
super(TestLoopBlockingPartition, self).setUp()
# LoopBlockingScheme records stats of all nodes.
self.total_ops = self.layer['PAR'].total_ops(self.batch_size)
self.par_proc_region = self.resource['PAR'].proc_region
def test_accfwd(self):
''' Scheme using accfwd. '''
for part in self._gen_all_partition():
p_nld = self._part_nld(part)
filter_size, ifmap_size, ofmap_size = self._total_part_size(part)
bufshr = BufShrScheme(self.par_proc_region, part)
# Filter may still have redundant fetch.
fil_fetch = part.size(pe.BATP, pe.OFMP) // bufshr.size(de.FIL)
for lbs in loop_blocking.gen_loopblocking(
p_nld, self.resource['PAR'], part, self.none_cstr,
self.cost, self.options['ACCFWD']):
if not lbs.is_valid():
continue
# Ops.
self.assertAlmostEqual(lbs.ops, self.total_ops)
# Access forwarding reduction.
accfwd_red = lbs.accfwd_reduction
self.assertEqual(accfwd_red[de.FIL],
part.size(pe.BATP, pe.OFMP) // fil_fetch)
self.assertEqual(accfwd_red[de.OFM], part.size(pe.INPP))
self.assertEqual(accfwd_red[de.IFM], part.size(pe.OUTP))
# Top fetch and access.
top_fetch = lbs.fetch[0]
top_access = lbs.access[0]
self.assertAlmostEqual(top_access[de.FIL],
top_fetch[de.FIL] * filter_size
* fil_fetch)
self.assertAlmostEqual(top_access[de.OFM],
top_fetch[de.OFM] * ofmap_size)
self.assertGreaterEqual(top_access[de.IFM],
top_fetch[de.IFM] * ifmap_size)
def test_bufshr(self):
''' Scheme using bufshr. '''
for part in self._gen_all_partition():
p_nld = self._part_nld(part)
bufshr = BufShrScheme(self.par_proc_region, part)
# Filter may still have redundant fetch.
fil_fetch = part.size(pe.BATP, pe.OFMP) // bufshr.size(de.FIL)
for optkey in ['BUFSHR', 'BUFSHR-BYP']:
for lbs in loop_blocking.gen_loopblocking(
p_nld, self.resource['PAR'], part, self.none_cstr,
self.cost, self.options[optkey]):
if not lbs.is_valid():
continue
# Ops.
self.assertAlmostEqual(lbs.ops, self.total_ops)
# Buffer sharing uses access forwarding reduction.
accfwd_red = lbs.accfwd_reduction
self.assertEqual(accfwd_red[de.FIL],
part.size(pe.BATP, pe.OFMP) // fil_fetch)
self.assertEqual(accfwd_red[de.OFM], part.size(pe.INPP))
self.assertEqual(accfwd_red[de.IFM], part.size(pe.OUTP))
# Buffer sharing group size.
bufshr_grp_size = lbs.bufshr_grp_size
self.assertSequenceEqual(bufshr_grp_size, accfwd_red)
# Buffer sharing subgroup size.
bufshr_subgrp_size = lbs.bufshr_subgrp_size
self.assertTrue(all(subgrp <= grp for subgrp, grp
in zip(bufshr_subgrp_size,
bufshr_grp_size)))
def test_bufshr_access(self):
''' Access of scheme using bufshr. '''
for part in self._gen_all_partition():
p_nld = self._part_nld(part)
bufshr = BufShrScheme(self.par_proc_region, part)
for lbs in loop_blocking.gen_loopblocking(
p_nld, self.resource['PAR'], part, self.none_cstr,
self.cost, self.options['BUFSHR']):
if not lbs.is_valid():
continue
# Skip those without bufshr.
if all(sgs <= 1 for sgs in lbs.bufshr_subgrp_size):
continue
# Sim.
dram_access, gbuf_access, bufshr_stats = \
self._sim_access_conv(lbs, get_bufshr=True)
self._verify_bufshr_stats(dram_access, gbuf_access,
bufshr_stats, lbs, bufshr,
'test_bufshr_access')
def test_bufshr_access_byp(self):
''' Access of scheme using bufshr with bypassing. '''
for part in self._gen_all_partition():
p_nld = self._part_nld(part)
bufshr = BufShrScheme(self.par_proc_region, part)
for lbs in loop_blocking.gen_loopblocking(
p_nld, self.resource['PAR'], part, self.none_cstr,
self.cost, self.options['BUFSHR-BYP']):
if not lbs.is_valid():
continue
# Skip those without bufshr.
if all(sgs <= 1 for sgs in lbs.bufshr_subgrp_size):
continue
# Skip those without bypassing.
if all(lbs.stored_in_gbuf):
continue
# Sim.
dram_access, gbuf_access, bufshr_stats = \
self._sim_access_conv(lbs, get_bufshr=True)
self._verify_bufshr_stats(dram_access, gbuf_access,
bufshr_stats, lbs, bufshr,
'test_bufshr_access')
def test_bufshr_rotation_example(self):
''' Example scheme using bufshr with rotation. '''
# Make a PartitionScheme that allows bufshr for all data categories.
part = PartitionScheme(order=range(pe.NUM),
pdims=((2, 1), (1, 2), (1, 1), (2, 1)))
bufshr = BufShrScheme(self.par_proc_region, part)
self.assertTrue(all(bufshr.size(dce) > 1 for dce in range(de.NUM)),
'test_bufshr_rotation_example: '
'made-up PartitionScheme is not expected: '
'{}, bufshr size {}'
.format(part,
[bufshr.size(dce) for dce in range(de.NUM)]))
# Make a LoopBlockingScheme that uses bufshr for all data categories.
p_nld = self._part_nld(part)
bl_ts = ((util.idivc(p_nld.loopcnt[le.IFM], 6),
util.idivc(p_nld.loopcnt[le.OFM], 9),
util.idivc(p_nld.loopcnt[le.BAT], 2)),
(3, 3, 2),
(2, 3, 1))
bl_ords = (tuple(range(le.NUM)), tuple(range(le.NUM)))
lbs = LoopBlockingScheme(p_nld, bl_ts, bl_ords, self.resource['PAR'],
bufshr, self.options['BUFSHR'])
self.assertTrue(lbs.is_valid())
self.assertGreater(sum(lbs.get_noc_access()), 0)
self.assertTrue(all(sgs > 1 for sgs in lbs.bufshr_subgrp_size)
and all(t > 1 for t in bl_ts[0]),
'test_bufshr_rotation_example: '
'made-up LoopBlockingScheme is not expected: '
'{}, top factors {}, bufshr subgrp size {}'
.format((bl_ts, bl_ords), bl_ts[0],
lbs.bufshr_subgrp_size))
# Sim.
dram_access, gbuf_access, bufshr_stats = \
self._sim_access_conv(lbs, get_bufshr=True)
self._verify_bufshr_stats(dram_access, gbuf_access, bufshr_stats,
lbs, bufshr, 'test_bufshr_rotation_example')
def test_bufshr_skip_rot_example(self):
''' Example scheme using bufshr that skips the single rotation. '''
# Make a PartitionScheme that allows bufshr for IFM.
part = PartitionScheme(order=range(pe.NUM),
pdims=((2, 2), (1, 1), (2, 1), (1, 1)))
bufshr = BufShrScheme(self.par_proc_region, part)
self.assertEqual(bufshr.size(de.IFM), 4,
'test_bufshr_skip_rot_example: '
'made-up PartitionScheme is not expected: '
'{}, bufshr size for {} {}.'
.format(part, de.IFM, bufshr.size(de.IFM)))
# Make a LoopBlockingScheme that has a single rotation for IFM.
p_nld = self._part_nld(part)
bl_ts = ((util.idivc(p_nld.loopcnt[le.IFM], 3),
util.idivc(p_nld.loopcnt[le.OFM], 3),
util.idivc(p_nld.loopcnt[le.BAT], 2)),
(1, 1, 2),
(3, 3, 1))
bl_ords = (tuple(range(le.NUM)), tuple(range(le.NUM)))
lbs = LoopBlockingScheme(p_nld, bl_ts, bl_ords, self.resource['PAR'],
bufshr, self.options['BUFSHR'])
self.assertTrue(lbs.is_valid())
self.assertGreater(sum(lbs.get_noc_access()), 0)
self.assertEqual(lbs.bufshr_subgrp_size[de.IFM], 4,
'test_bufshr_skip_rot_example: '
'made-up LoopBlockingScheme is not expected: '
'{}, bufshr subgrp size for {} {}.'
.format((bl_ts, bl_ords), de.IFM,
lbs.bufshr_subgrp_size[de.IFM]))
self.assertGreater(lbs.bufshr_wide_fetch_width[de.IFM], 1,
'test_bufshr_skip_rot_example: '
'made-up LoopBlockingScheme is not expected: '
'{}, bufshr wide fetch width for {} {}.'
.format((bl_ts, bl_ords), de.IFM,
lbs.bufshr_wide_fetch_width[de.IFM]))
self.assertEqual(lbs.bufshr_rot_round_cnt[de.IFM], 0,
'test_bufshr_skip_rot_example: '
'made-up LoopBlockingScheme is not expected: '
'{}, bufshr rotation rounds for {} {}'
.format((bl_ts, bl_ords), de.IFM,
lbs.bufshr_rot_round_cnt[de.IFM]))
# Sim.
dram_access, gbuf_access, bufshr_stats = \
self._sim_access_conv(lbs, get_bufshr=True)
self._verify_bufshr_stats(dram_access, gbuf_access, bufshr_stats,
lbs, bufshr,
'test_bufshr_skip_rot_example')
def test_bufshr_wide_fetch_example(self):
''' Example scheme using bufshr with wide fetch. '''
# Make a PartitionScheme that allows bufshr for IFM.
part = PartitionScheme(order=range(pe.NUM),
pdims=((2, 2), (1, 1), (2, 1), (1, 1)))
bufshr = BufShrScheme(self.par_proc_region, part)
self.assertEqual(bufshr.size(de.IFM), 4,
'test_bufshr_wide_fetch_example: '
'made-up PartitionScheme is not expected: '
'{}, bufshr size for {} {}.'
.format(part, de.IFM, bufshr.size(de.IFM)))
for t1, t2 in [((3, 3, 1), (1, 1, 2)),
((1, 3, 2), (3, 1, 1))]:
# Make a LoopBlockingScheme that has wide fetch for IFM.
p_nld = self._part_nld(part)
bl_ts = (tuple(util.idivc(p_nld.loopcnt[lpe], t1[lpe] * t2[lpe])
for lpe in range(le.NUM)),
t1, t2)
# At GBUF level, from inner to outer: le.BAT, le.IFM, le.OFM.
bl_ords = (tuple(range(le.NUM)), (1, 2, 0))
lbs = LoopBlockingScheme(p_nld, bl_ts, bl_ords,
self.resource['PAR'], bufshr,
self.options['BUFSHR'])
self.assertTrue(lbs.is_valid())
self.assertGreater(sum(lbs.get_noc_access()), 0)
self.assertEqual(lbs.bufshr_subgrp_size[de.IFM], 4,
'test_bufshr_wide_fetch_example: '
'made-up LoopBlockingScheme is not expected: '
'{}, bufshr subgrp size for {} {}.'
.format((bl_ts, bl_ords), de.IFM,
lbs.bufshr_subgrp_size[de.IFM]))
self.assertGreater(lbs.bufshr_wide_fetch_width[de.IFM], 1,
'test_bufshr_wide_fetch_example: '
'made-up LoopBlockingScheme is not expected: '
'{}, bufshr wide fetch width for {} {}.'
.format((bl_ts, bl_ords), de.IFM,
lbs.bufshr_wide_fetch_width[de.IFM]))
self.assertGreater(lbs.bufshr_rot_round_cnt[de.IFM], 0,
'test_bufshr_wide_fetch_example: '
'made-up LoopBlockingScheme is not expected: '
'{}, bufshr rotation rounds for {} {}'
.format((bl_ts, bl_ords), de.IFM,
lbs.bufshr_rot_round_cnt[de.IFM]))
# Sim.
dram_access, gbuf_access, bufshr_stats = \
self._sim_access_conv(lbs, get_bufshr=True)
self._verify_bufshr_stats(dram_access, gbuf_access, bufshr_stats,
lbs, bufshr,
'test_bufshr_wide_fetch_example')
def test_bufshr_multisubgrp_example(self):
''' Example scheme using bufshr with multiple subgroups in a group. '''
# Make a PartitionScheme that allows bufshr for IFM.
part = PartitionScheme(order=list(reversed(range(pe.NUM))),
pdims=((2, 2), (1, 1), (2, 1), (1, 1)))
bufshr = BufShrScheme(self.par_proc_region, part)
self.assertEqual(bufshr.size(de.IFM), 4,
'test_bufshr_multisubgrp_example: '
'made-up PartitionScheme is not expected: '
'{}, bufshr size for {} {}.'
.format(part, de.IFM, bufshr.size(de.IFM)))
# Make a LoopBlockingScheme that has multi subgroups per group for IFM.
p_nld = self._part_nld(part)
bl_ts = ((util.idivc(p_nld.loopcnt[le.IFM], 1),
util.idivc(p_nld.loopcnt[le.OFM], 3),
util.idivc(p_nld.loopcnt[le.BAT], 2)),
(1, 3, 2),
(1, 1, 1))
# At GBUF level, from inner to outer: le.BAT, le.OFM, le.IFM.
bl_ords = (tuple(range(le.NUM)), (2, 1, 0))
lbs = LoopBlockingScheme(p_nld, bl_ts, bl_ords, self.resource['PAR'],
bufshr, self.options['BUFSHR'])
self.assertTrue(lbs.is_valid())
self.assertGreater(sum(lbs.get_noc_access()), 0)
self.assertGreater(lbs.bufshr_grp_size[de.IFM],
lbs.bufshr_subgrp_size[de.IFM],
'test_bufshr_multisubgrp_example: '
'made-up LoopBlockingScheme is not expected: '
'{}, bufshr grp size {}, bufshr subgrp size {}'
.format((bl_ts, bl_ords), lbs.bufshr_grp_size,
lbs.bufshr_subgrp_size))
self.assertGreater(lbs.bufshr_rot_round_cnt[de.IFM], 0,
'test_bufshr_multisubgrp_example: '
'made-up LoopBlockingScheme is not expected: '
'{}, bufshr rotation rounds for {} {}'
.format((bl_ts, bl_ords), de.IFM,
lbs.bufshr_rot_round_cnt[de.IFM]))
# Sim.
dram_access, gbuf_access, bufshr_stats = \
self._sim_access_conv(lbs, get_bufshr=True)
self._verify_bufshr_stats(dram_access, gbuf_access, bufshr_stats,
lbs, bufshr,
'test_bufshr_multisubgrp_example')
def test_bufshr_get_noc_access(self):
''' get_noc_access of scheme using bufshr. '''
for part in self._gen_all_partition():
p_nld = self._part_nld(part)
for lbs in loop_blocking.gen_loopblocking(
p_nld, self.resource['PAR'], part, self.none_cstr,
self.cost, self.options['BUFSHR']):
noc_access = lbs.get_noc_access()
if not lbs.is_valid():
self.assertIsNone(noc_access)
else:
for dce in range(de.NUM):
self.assertAlmostEqual(
lbs.bufshr_rotation_access[dce]
+ lbs.bufshr_wide_fetch_access[dce],
noc_access[dce])
def test_bufshr_localregionlayer(self):
''' Scheme using bufshr for LocalRegionLayer. '''
for part in self._gen_all_partition(layerkey='POOL'):
p_nld = self._part_nld(part, layerkey='POOL')
for lbs in loop_blocking.gen_loopblocking(
p_nld, self.resource['PAR'], part, self.none_cstr,
self.cost, self.options['BUFSHR']):
if not lbs.is_valid():
continue
self.assertTrue(all(gs == 1 for gs in lbs.bufshr_grp_size),
'test_bufshr_localregionlayer: '
'non-1 bufshr group size {}, part {}'
.format(lbs.bufshr_grp_size, part))
| stanford-mast/nn_dataflow | nn_dataflow/tests/loop_blocking_test/test_loop_blocking_partition.py | Python | bsd-3-clause | 18,952 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""A simple Python file."""
CHARLIE = "Brown"
VIOLET = "Gray"
PATRICIA = "Reichardt"
LINUS = "van Pelt"
| neal-rogers/is210-week-02-synthesizing | task_03.py | Python | mpl-2.0 | 151 |
# -*- coding: utf-8 -*-
import os
import pytest
from gridsync.errors import FilesystemLockError
from gridsync.lock import FilesystemLock
def test_lock_acquire(tmpdir):
lock = FilesystemLock(os.path.join(str(tmpdir), "test.lock"))
lock.acquire()
assert lock.fd
def test_lock_acquire_filepath_created(tmpdir):
lock = FilesystemLock(os.path.join(str(tmpdir), "test.lock"))
lock.acquire()
assert os.path.isfile(lock.filepath)
def test_lock_acquire_raise_filesystemlockerror_on_second_call(tmpdir):
lock = FilesystemLock(os.path.join(str(tmpdir), "test.lock"))
lock.acquire()
with pytest.raises(FilesystemLockError):
lock.acquire()
def test_lock_acquire_raise_filesystemlockerror_from_second_instance(tmpdir):
lock_1 = FilesystemLock(os.path.join(str(tmpdir), "test.lock"))
lock_1.acquire()
lock_2 = FilesystemLock(os.path.join(str(tmpdir), "test.lock"))
with pytest.raises(FilesystemLockError):
lock_2.acquire()
def test_lock_release(tmpdir):
lock = FilesystemLock(os.path.join(str(tmpdir), "test.lock"))
lock.acquire()
lock.release()
lock.acquire()
lock.release()
| gridsync/gridsync | tests/test_lock.py | Python | gpl-3.0 | 1,162 |
# setup.py - distutils configuration for esm and esmre modules
# Copyright (C) 2007 Tideway Systems Limited.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
from setuptools import setup, Extension
module1 = Extension("esm",
#define_macros=[("HEAP_CHECK", 1)],
sources = ['src/esm.c',
'src/aho_corasick.c',
'src/ac_heap.c',
'src/ac_list.c'])
setup (name = "esmre",
version = '0.3.2',
description = 'Regular expression accelerator',
long_description = " ".join("""
Modules used to accelerate execution of a large collection of regular
expressions using the Aho-Corasick algorithms.
""".strip().split()),
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: '
'GNU Library or Lesser General Public License (LGPL)',
'Operating System :: POSIX',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Indexing'
],
install_requires=['setuptools'],
author = 'Will Harris, Matteo Angelino',
author_email = 'esmre@greatlibrary.net, matteo.angelino@gmail.com',
url = 'http://code.google.com/p/esmre/',
license = 'GNU LGPL',
platforms = ['POSIX'],
ext_modules = [module1],
package_dir = {'': 'src'},
py_modules = ["esmre"])
| mangelin/esmre | setup.py | Python | lgpl-2.1 | 2,313 |
import hashlib
import hmac
import re
import time
from urllib import urlencode
from django.conf import settings
from django.contrib.auth.middleware import (AuthenticationMiddleware as
BaseAuthenticationMiddleware)
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from django.middleware.gzip import GZipMiddleware as BaseGZipMiddleware
from django.utils.cache import patch_vary_headers
import commonware.log
from django_statsd.clients import statsd
from django_statsd.middleware import (GraphiteRequestTimingMiddleware,
TastyPieRequestTimingMiddleware)
from multidb.pinning import (pin_this_thread, this_thread_is_pinned,
unpin_this_thread)
from multidb.middleware import PinningRouterMiddleware
from oauthlib.common import Request
from oauthlib.oauth1.rfc5849 import signature
from mkt.api.models import Access, ACCESS_TOKEN, Token
from mkt.api.oauth import server, validator
from mkt.carriers import get_carrier
from mkt.users.models import UserProfile
log = commonware.log.getLogger('z.api')
class RestOAuthMiddleware(object):
"""
This is based on https://github.com/amrox/django-tastypie-two-legged-oauth
with permission.
"""
def process_request(self, request):
# For now we only want these to apply to the API.
# This attribute is set in APIBaseMiddleware.
if not getattr(request, 'API', False):
return
if not settings.SITE_URL:
raise ValueError('SITE_URL is not specified')
# Set up authed_from attribute.
if not hasattr(request, 'authed_from'):
request.authed_from = []
auth_header_value = request.META.get('HTTP_AUTHORIZATION')
# If there is a mkt-shared-secret in the auth header, ignore it.
if (auth_header_value and
auth_header_value.split(None, 1)[0] == 'mkt-shared-secret'):
log.info('mkt-shared-secret found, ignoring.')
return
if (not auth_header_value and
'oauth_token' not in request.META['QUERY_STRING']):
self.user = AnonymousUser()
log.info('No HTTP_AUTHORIZATION header')
return
# Set up authed_from attribute.
auth_header = {'Authorization': auth_header_value}
method = getattr(request, 'signed_method', request.method)
if ('oauth_token' in request.META['QUERY_STRING'] or
'oauth_token' in auth_header_value):
# This is 3-legged OAuth.
log.info('Trying 3 legged OAuth')
try:
valid, oauth_req = server.validate_protected_resource_request(
request.build_absolute_uri(),
http_method=method,
body=request.body,
headers=auth_header)
except ValueError:
log.warning('ValueError on verifying_request', exc_info=True)
return
if not valid:
log.warning(u'Cannot find APIAccess token with that key: %s'
% oauth_req.attempted_key)
return
uid = Token.objects.filter(
token_type=ACCESS_TOKEN,
key=oauth_req.resource_owner_key).values_list(
'user_id', flat=True)[0]
request.user = UserProfile.objects.select_related(
'user').get(pk=uid)
else:
# This is 2-legged OAuth.
log.info('Trying 2 legged OAuth')
try:
client_key = validate_2legged_oauth(
server,
request.build_absolute_uri(),
method, auth_header)
except TwoLeggedOAuthError, e:
log.warning(str(e))
return
except ValueError:
log.warning('ValueError on verifying_request', exc_info=True)
return
uid = Access.objects.filter(
key=client_key).values_list(
'user_id', flat=True)[0]
request.user = UserProfile.objects.select_related(
'user').get(pk=uid)
# But you cannot have one of these roles.
denied_groups = set(['Admins'])
roles = set(request.user.groups.values_list('name', flat=True))
if roles and roles.intersection(denied_groups):
log.info(u'Attempt to use API with denied role, user: %s'
% request.user.pk)
# Set request user back to Anonymous.
request.user = AnonymousUser()
return
if request.user.is_authenticated():
request.authed_from.append('RestOAuth')
log.info('Successful OAuth with user: %s' % request.user)
class TwoLeggedOAuthError(Exception):
pass
def validate_2legged_oauth(oauth, uri, method, auth_header):
"""
"Two-legged" OAuth authorization isn't standard and so not
supported by current versions of oauthlib. The implementation
here is sufficient for simple developer tools and testing. Real
usage of OAuth will always require directing the user to the
authorization page so that a resource-owner token can be
generated.
"""
req = Request(uri, method, '', auth_header)
typ, params, oauth_params = oauth._get_signature_type_and_params(req)
oauth_params = dict(oauth_params)
req.params = filter(lambda x: x[0] not in ("oauth_signature", "realm"),
params)
req.signature = oauth_params.get('oauth_signature')
req.client_key = oauth_params.get('oauth_consumer_key')
req.nonce = oauth_params.get('oauth_nonce')
req.timestamp = oauth_params.get('oauth_timestamp')
if oauth_params.get('oauth_signature_method').lower() != 'hmac-sha1':
raise TwoLeggedOAuthError(u'unsupported signature method ' +
oauth_params.get('oauth_signature_method'))
secret = validator.get_client_secret(req.client_key, req)
valid_signature = signature.verify_hmac_sha1(req, secret, None)
if valid_signature:
return req.client_key
else:
raise TwoLeggedOAuthError(
u'Cannot find APIAccess token with that key: %s'
% req.client_key)
class RestSharedSecretMiddleware(object):
def process_request(self, request):
# For now we only want these to apply to the API.
# This attribute is set in APIBaseMiddleware.
if not getattr(request, 'API', False):
return
# Set up authed_from attribute.
if not hasattr(request, 'authed_from'):
request.authed_from = []
header = request.META.get('HTTP_AUTHORIZATION', '').split(None, 1)
if header and header[0].lower() == 'mkt-shared-secret':
auth = header[1]
else:
auth = request.GET.get('_user')
if not auth:
log.info('API request made without shared-secret auth token')
return
try:
email, hm, unique_id = str(auth).split(',')
consumer_id = hashlib.sha1(
email + settings.SECRET_KEY).hexdigest()
matches = hmac.new(unique_id + settings.SECRET_KEY,
consumer_id, hashlib.sha512).hexdigest() == hm
if matches:
try:
request.user = UserProfile.objects.get(email=email)
request.authed_from.append('RestSharedSecret')
except UserProfile.DoesNotExist:
log.info('Auth token matches absent user (%s)' % email)
return
else:
log.info('Shared-secret auth token does not match')
return
log.info('Successful SharedSecret with user: %s' % request.user.pk)
return
except Exception, e:
log.info('Bad shared-secret auth data: %s (%s)', auth, e)
return
# How long to set the time-to-live on the cache.
PINNING_SECONDS = int(getattr(settings, 'MULTIDB_PINNING_SECONDS', 15))
class APIPinningMiddleware(PinningRouterMiddleware):
"""
Similar to multidb, but we can't rely on cookies. Instead we cache the
users who are to be pinned with a cache timeout. Users who are to be
pinned are those that are not anonymous users and who are either making
an updating request or who are already in our cache as having done one
recently.
If not in the API, will fall back to the cookie pinning middleware.
Note: because the authentication process happens late when we are in the
API, process_request() will be manually called from authentication classes
when a user is successfully authenticated by one of those classes.
"""
def cache_key(self, request):
"""Returns cache key based on user ID."""
return u'api-pinning:%s' % request.user.id
def process_request(self, request):
if not getattr(request, 'API', False):
return super(APIPinningMiddleware, self).process_request(request)
if (request.user and not request.user.is_anonymous() and
(cache.get(self.cache_key(request)) or
request.method in ['DELETE', 'PATCH', 'POST', 'PUT'])):
statsd.incr('api.db.pinned')
pin_this_thread()
return
statsd.incr('api.db.unpinned')
unpin_this_thread()
def process_response(self, request, response):
if not getattr(request, 'API', False):
return (super(APIPinningMiddleware, self)
.process_response(request, response))
response['API-Pinned'] = str(this_thread_is_pinned())
if (request.user and not request.user.is_anonymous() and (
request.method in ['DELETE', 'PATCH', 'POST', 'PUT'] or
getattr(response, '_db_write', False))):
cache.set(self.cache_key(request), 1, PINNING_SECONDS)
return response
class CORSMiddleware(object):
def process_response(self, request, response):
# This is mostly for use by tastypie. Which doesn't really have a nice
# hook for figuring out if a response should have the CORS headers on
# it. That's because it will often error out with immediate HTTP
# responses.
response['Access-Control-Allow-Headers'] = ', '.join(
getattr(request, 'CORS_HEADERS',
('X-HTTP-Method-Override', 'Content-Type')))
error_allowed_methods = []
if response.status_code >= 300 and request.API:
error_allowed_methods = [request.method]
cors_allowed_methods = getattr(request, 'CORS', error_allowed_methods)
if cors_allowed_methods:
response['Access-Control-Allow-Origin'] = '*'
methods = [h.upper() for h in cors_allowed_methods]
if 'OPTIONS' not in methods:
methods.append('OPTIONS')
response['Access-Control-Allow-Methods'] = ', '.join(methods)
# The headers that the response will be able to access.
response['Access-Control-Expose-Headers'] = (
'API-Filter, API-Status, API-Version')
return response
v_re = re.compile('^/api/v(?P<version>\d+)/|^/api/|^/api$')
def detect_api_version(request):
url = request.META.get('PATH_INFO', '')
version = v_re.match(url).group('version')
if not version:
version = 1
return version
class APIBaseMiddleware(object):
"""
Detects if this is an API call, and figures out what version of the API
they are on. Maybe adds in a deprecation notice.
"""
def get_api(self, request):
if not hasattr(request, 'API'):
request.API = False
prefix, _, _ = request.get_full_path().lstrip('/').partition('/')
if prefix.lower() == 'api':
request.API = True
return request.API
def process_request(self, request):
if self.get_api(request):
version = detect_api_version(request)
request.API_VERSION = int(version)
def process_response(self, request, response):
if not self.get_api(request):
return response
version = getattr(request, 'API_VERSION', None)
if version is None:
version = detect_api_version(request)
response['API-Version'] = version
if version < settings.API_CURRENT_VERSION:
response['API-Status'] = 'Deprecated'
return response
class APIFilterMiddleware(object):
"""
Add an API-Filter header containing a urlencoded string of filters applied
to API requests.
"""
def process_response(self, request, response):
if getattr(request, 'API', False) and response.status_code < 500:
devices = []
for device in ('GAIA', 'TV', 'MOBILE', 'TABLET'):
if getattr(request, device, False):
devices.append(device.lower())
filters = (
('carrier', get_carrier() or ''),
('device', devices),
('lang', request.LANG),
('pro', request.GET.get('pro', '')),
('region', request.REGION.slug),
)
response['API-Filter'] = urlencode(filters, doseq=True)
patch_vary_headers(response, ['API-Filter'])
return response
class TimingMiddleware(GraphiteRequestTimingMiddleware):
"""
A wrapper around django_statsd timing middleware that sends different
statsd pings if being used in API.
"""
def process_view(self, request, *args):
if getattr(request, 'API', False):
TastyPieRequestTimingMiddleware().process_view(request, *args)
else:
super(TimingMiddleware, self).process_view(request, *args)
def _record_time(self, request):
pre = 'api' if getattr(request, 'API', False) else 'view'
if hasattr(request, '_start_time'):
ms = int((time.time() - request._start_time) * 1000)
data = {'method': request.method,
'module': request._view_module,
'name': request._view_name,
'pre': pre}
statsd.timing('{pre}.{module}.{name}.{method}'.format(**data), ms)
statsd.timing('{pre}.{module}.{method}'.format(**data), ms)
statsd.timing('{pre}.{method}'.format(**data), ms)
class GZipMiddleware(BaseGZipMiddleware):
"""
Wrapper around GZipMiddleware, which only enables gzip for API responses.
It specifically avoids enabling it for non-API responses because that might
leak security tokens through the BREACH attack.
https://www.djangoproject.com/weblog/2013/aug/06/breach-and-django/
http://breachattack.com/
https://bugzilla.mozilla.org/show_bug.cgi?id=960752
"""
def process_response(self, request, response):
if not getattr(request, 'API', False):
return response
return super(GZipMiddleware, self).process_response(request, response)
class AuthenticationMiddleware(BaseAuthenticationMiddleware):
"""
Wrapper around AuthenticationMiddleware, which only performs the django
session based auth for non-API requests.
"""
def process_request(self, request):
if getattr(request, 'API', False):
request.user = AnonymousUser()
else:
super(AuthenticationMiddleware, self).process_request(request)
METHOD_OVERRIDE_HEADER = 'HTTP_X_HTTP_METHOD_OVERRIDE'
class MethodOverrideMiddleware(object):
def process_view(self, request, callback, callback_args, callback_kwargs):
if request.method != 'POST':
return
if METHOD_OVERRIDE_HEADER not in request.META:
return
request.method = request.META[METHOD_OVERRIDE_HEADER]
| washort/zamboni | mkt/api/middleware.py | Python | bsd-3-clause | 15,987 |
'''OpenGL extension AMD.transform_feedback3_lines_triangles
This module customises the behaviour of the
OpenGL.raw.GL.AMD.transform_feedback3_lines_triangles to provide a more
Python-friendly API
Overview (from the spec)
OpenGL 4.0 introduced the ability to record primitives into multiple output
streams using transform feedback. However, the restriction that all streams
must output POINT primitives when more than one output stream is active was
also introduced. This extension simply removes that restriction, allowing
the same set of primitives to be used with multiple transform feedback
streams as with a single stream.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/AMD/transform_feedback3_lines_triangles.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.AMD.transform_feedback3_lines_triangles import *
from OpenGL.raw.GL.AMD.transform_feedback3_lines_triangles import _EXTENSION_NAME
def glInitTransformFeedback3LinesTrianglesAMD():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | stack-of-tasks/rbdlpy | tutorial/lib/python2.7/site-packages/OpenGL/GL/AMD/transform_feedback3_lines_triangles.py | Python | lgpl-3.0 | 1,320 |
from textwrap import dedent
from peru import plugin
import shared
def assert_parallel(n):
# The plugin module keep a global counter of all the jobs that run in
# parallel, so that we can write these tests.
if plugin.DEBUG_PARALLEL_MAX != n:
raise AssertionError('Expected {} parallel {}. Counted {}.'.format(
n, 'job' if n == 1 else 'jobs', plugin.DEBUG_PARALLEL_MAX))
class ParallelismTest(shared.PeruTest):
def setUp(self):
# Make sure nothing is fishy with the jobs counter, and reset the max.
plugin.debug_assert_clean_parallel_count()
plugin.DEBUG_PARALLEL_MAX = 0
def tearDown(self):
# Make sure nothing is fishy with the jobs counter. No sense in
# resetting the max here, because the rest of our tests don't know to
# reset it anyway.
plugin.debug_assert_clean_parallel_count()
def test_two_jobs_in_parallel(self):
# This just checks that two different modules can actually be fetched
# in parallel.
foo = shared.create_dir()
bar = shared.create_dir()
peru_yaml = dedent('''\
imports:
foo: ./
bar: ./
cp module foo:
path: {}
cp module bar:
path: {}
'''.format(foo, bar))
test_dir = shared.create_dir({'peru.yaml': peru_yaml})
shared.run_peru_command(['sync'], test_dir)
assert_parallel(2)
def test_jobs_flag(self):
# This checks that the --jobs flag is respected, even when two modules
# could have been fetched in parallel.
foo = shared.create_dir()
bar = shared.create_dir()
peru_yaml = dedent('''\
imports:
foo: ./
bar: ./
cp module foo:
path: {}
cp module bar:
path: {}
'''.format(foo, bar))
test_dir = shared.create_dir({'peru.yaml': peru_yaml})
shared.run_peru_command(['sync', '-j1'], test_dir)
assert_parallel(1)
def test_identical_fields(self):
# This checks that modules with identical fields are not fetched in
# parallel. This is the same logic that protects us from fetching a
# given module twice, like when it's imported with two different named
# rules.
foo = shared.create_dir()
peru_yaml = dedent('''\
imports:
foo1: ./
foo2: ./
cp module foo1:
path: {}
cp module foo2:
path: {}
'''.format(foo, foo))
test_dir = shared.create_dir({'peru.yaml': peru_yaml})
shared.run_peru_command(['sync'], test_dir)
assert_parallel(1)
def test_identical_plugin_cache_fields(self):
# Plugins that use caching also need to avoid running in parallel, if
# their cache directories are the same. The noop_cache plugin (created
# for this test) uses the path field (but not the nonce field) in its
# plugin cache key. Check that these two modules are not fetched in
# parallel, even though their module fields aren't exactly the same.
foo = shared.create_dir()
peru_yaml = dedent('''\
imports:
foo1: ./
foo2: ./
noop_cache module foo1:
path: {}
# nonce is ignored, but it makes foo1 different from foo2 as
# far as the module cache is concerned
nonce: '1'
noop_cache module foo2:
path: {}
nonce: '2'
'''.format(foo, foo))
test_dir = shared.create_dir({'peru.yaml': peru_yaml})
shared.run_peru_command(['sync'], test_dir)
assert_parallel(1)
| olson-sean-k/peru | tests/test_parallelism.py | Python | mit | 3,868 |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 04 15:49:07 2016
@author: alr
"""
from pyAPP6Tools import MissionOptimization
from pyAPP6Tools.MissionOptimization import segmentParameter, resFunctionMinimizeEndValue, missionObjective, updateEndCondition
if __name__ == "__main__":
misfile = r'A320_optFuelBurn.mis'
segParList = [segmentParameter(3,154.33,updateEndCondition),
segmentParameter(4,0.78,updateEndCondition),
segmentParameter(5,9144,updateEndCondition),
segmentParameter(7,154.33,updateEndCondition)]
obj = missionObjective('Fuel Mass',resFunctionMinimizeEndValue,mode='max')
res = MissionOptimization.optimizeMission(misfile=misfile, segParList=segParList, misObjective=obj)
| ALR-Aerospace/pyAPP6Tools | Examples/MissionOptimization/optimize_A320_optFuelBurn.py | Python | mit | 802 |
"""Constants for the DLNA DMR component."""
from __future__ import annotations
from collections.abc import Mapping
import logging
from typing import Final
from async_upnp_client.profiles.dlna import PlayMode as _PlayMode
from homeassistant.components.media_player import const as _mp_const
LOGGER = logging.getLogger(__package__)
DOMAIN: Final = "dlna_dmr"
CONF_LISTEN_PORT: Final = "listen_port"
CONF_CALLBACK_URL_OVERRIDE: Final = "callback_url_override"
CONF_POLL_AVAILABILITY: Final = "poll_availability"
DEFAULT_NAME: Final = "DLNA Digital Media Renderer"
CONNECT_TIMEOUT: Final = 10
PROTOCOL_HTTP: Final = "http-get"
PROTOCOL_RTSP: Final = "rtsp-rtp-udp"
PROTOCOL_ANY: Final = "*"
STREAMABLE_PROTOCOLS: Final = [PROTOCOL_HTTP, PROTOCOL_RTSP, PROTOCOL_ANY]
# Map UPnP class to media_player media_content_type
MEDIA_TYPE_MAP: Mapping[str, str] = {
"object": _mp_const.MEDIA_TYPE_URL,
"object.item": _mp_const.MEDIA_TYPE_URL,
"object.item.imageItem": _mp_const.MEDIA_TYPE_IMAGE,
"object.item.imageItem.photo": _mp_const.MEDIA_TYPE_IMAGE,
"object.item.audioItem": _mp_const.MEDIA_TYPE_MUSIC,
"object.item.audioItem.musicTrack": _mp_const.MEDIA_TYPE_MUSIC,
"object.item.audioItem.audioBroadcast": _mp_const.MEDIA_TYPE_MUSIC,
"object.item.audioItem.audioBook": _mp_const.MEDIA_TYPE_PODCAST,
"object.item.videoItem": _mp_const.MEDIA_TYPE_VIDEO,
"object.item.videoItem.movie": _mp_const.MEDIA_TYPE_MOVIE,
"object.item.videoItem.videoBroadcast": _mp_const.MEDIA_TYPE_TVSHOW,
"object.item.videoItem.musicVideoClip": _mp_const.MEDIA_TYPE_VIDEO,
"object.item.playlistItem": _mp_const.MEDIA_TYPE_PLAYLIST,
"object.item.textItem": _mp_const.MEDIA_TYPE_URL,
"object.item.bookmarkItem": _mp_const.MEDIA_TYPE_URL,
"object.item.epgItem": _mp_const.MEDIA_TYPE_EPISODE,
"object.item.epgItem.audioProgram": _mp_const.MEDIA_TYPE_EPISODE,
"object.item.epgItem.videoProgram": _mp_const.MEDIA_TYPE_EPISODE,
"object.container": _mp_const.MEDIA_TYPE_PLAYLIST,
"object.container.person": _mp_const.MEDIA_TYPE_ARTIST,
"object.container.person.musicArtist": _mp_const.MEDIA_TYPE_ARTIST,
"object.container.playlistContainer": _mp_const.MEDIA_TYPE_PLAYLIST,
"object.container.album": _mp_const.MEDIA_TYPE_ALBUM,
"object.container.album.musicAlbum": _mp_const.MEDIA_TYPE_ALBUM,
"object.container.album.photoAlbum": _mp_const.MEDIA_TYPE_ALBUM,
"object.container.genre": _mp_const.MEDIA_TYPE_GENRE,
"object.container.genre.musicGenre": _mp_const.MEDIA_TYPE_GENRE,
"object.container.genre.movieGenre": _mp_const.MEDIA_TYPE_GENRE,
"object.container.channelGroup": _mp_const.MEDIA_TYPE_CHANNELS,
"object.container.channelGroup.audioChannelGroup": _mp_const.MEDIA_TYPE_CHANNELS,
"object.container.channelGroup.videoChannelGroup": _mp_const.MEDIA_TYPE_CHANNELS,
"object.container.epgContainer": _mp_const.MEDIA_TYPE_TVSHOW,
"object.container.storageSystem": _mp_const.MEDIA_TYPE_PLAYLIST,
"object.container.storageVolume": _mp_const.MEDIA_TYPE_PLAYLIST,
"object.container.storageFolder": _mp_const.MEDIA_TYPE_PLAYLIST,
"object.container.bookmarkFolder": _mp_const.MEDIA_TYPE_PLAYLIST,
}
# Map media_player media_content_type to UPnP class. Not everything will map
# directly, in which case it's not specified and other defaults will be used.
MEDIA_UPNP_CLASS_MAP: Mapping[str, str] = {
_mp_const.MEDIA_TYPE_ALBUM: "object.container.album.musicAlbum",
_mp_const.MEDIA_TYPE_ARTIST: "object.container.person.musicArtist",
_mp_const.MEDIA_TYPE_CHANNEL: "object.item.videoItem.videoBroadcast",
_mp_const.MEDIA_TYPE_CHANNELS: "object.container.channelGroup",
_mp_const.MEDIA_TYPE_COMPOSER: "object.container.person.musicArtist",
_mp_const.MEDIA_TYPE_CONTRIBUTING_ARTIST: "object.container.person.musicArtist",
_mp_const.MEDIA_TYPE_EPISODE: "object.item.epgItem.videoProgram",
_mp_const.MEDIA_TYPE_GENRE: "object.container.genre",
_mp_const.MEDIA_TYPE_IMAGE: "object.item.imageItem",
_mp_const.MEDIA_TYPE_MOVIE: "object.item.videoItem.movie",
_mp_const.MEDIA_TYPE_MUSIC: "object.item.audioItem.musicTrack",
_mp_const.MEDIA_TYPE_PLAYLIST: "object.item.playlistItem",
_mp_const.MEDIA_TYPE_PODCAST: "object.item.audioItem.audioBook",
_mp_const.MEDIA_TYPE_SEASON: "object.item.epgItem.videoProgram",
_mp_const.MEDIA_TYPE_TRACK: "object.item.audioItem.musicTrack",
_mp_const.MEDIA_TYPE_TVSHOW: "object.item.videoItem.videoBroadcast",
_mp_const.MEDIA_TYPE_URL: "object.item.bookmarkItem",
_mp_const.MEDIA_TYPE_VIDEO: "object.item.videoItem",
}
# Translation of MediaMetadata keys to DIDL-Lite keys.
# See https://developers.google.com/cast/docs/reference/messages#MediaData via
# https://www.home-assistant.io/integrations/media_player/ for HA keys.
# See http://www.upnp.org/specs/av/UPnP-av-ContentDirectory-v4-Service.pdf for
# DIDL-Lite keys.
MEDIA_METADATA_DIDL: Mapping[str, str] = {
"subtitle": "longDescription",
"releaseDate": "date",
"studio": "publisher",
"season": "episodeSeason",
"episode": "episodeNumber",
"albumName": "album",
"trackNumber": "originalTrackNumber",
}
# For (un)setting repeat mode, map a combination of shuffle & repeat to a list
# of play modes in order of suitability. Fall back to _PlayMode.NORMAL in any
# case. NOTE: This list is slightly different to that in SHUFFLE_PLAY_MODES,
# due to fallback behaviour when turning on repeat modes.
REPEAT_PLAY_MODES: Mapping[tuple[bool, str], list[_PlayMode]] = {
(False, _mp_const.REPEAT_MODE_OFF): [
_PlayMode.NORMAL,
],
(False, _mp_const.REPEAT_MODE_ONE): [
_PlayMode.REPEAT_ONE,
_PlayMode.REPEAT_ALL,
_PlayMode.NORMAL,
],
(False, _mp_const.REPEAT_MODE_ALL): [
_PlayMode.REPEAT_ALL,
_PlayMode.REPEAT_ONE,
_PlayMode.NORMAL,
],
(True, _mp_const.REPEAT_MODE_OFF): [
_PlayMode.SHUFFLE,
_PlayMode.RANDOM,
_PlayMode.NORMAL,
],
(True, _mp_const.REPEAT_MODE_ONE): [
_PlayMode.REPEAT_ONE,
_PlayMode.RANDOM,
_PlayMode.SHUFFLE,
_PlayMode.NORMAL,
],
(True, _mp_const.REPEAT_MODE_ALL): [
_PlayMode.RANDOM,
_PlayMode.REPEAT_ALL,
_PlayMode.SHUFFLE,
_PlayMode.NORMAL,
],
}
# For (un)setting shuffle mode, map a combination of shuffle & repeat to a list
# of play modes in order of suitability. Fall back to _PlayMode.NORMAL in any
# case.
SHUFFLE_PLAY_MODES: Mapping[tuple[bool, str], list[_PlayMode]] = {
(False, _mp_const.REPEAT_MODE_OFF): [
_PlayMode.NORMAL,
],
(False, _mp_const.REPEAT_MODE_ONE): [
_PlayMode.REPEAT_ONE,
_PlayMode.REPEAT_ALL,
_PlayMode.NORMAL,
],
(False, _mp_const.REPEAT_MODE_ALL): [
_PlayMode.REPEAT_ALL,
_PlayMode.REPEAT_ONE,
_PlayMode.NORMAL,
],
(True, _mp_const.REPEAT_MODE_OFF): [
_PlayMode.SHUFFLE,
_PlayMode.RANDOM,
_PlayMode.NORMAL,
],
(True, _mp_const.REPEAT_MODE_ONE): [
_PlayMode.RANDOM,
_PlayMode.SHUFFLE,
_PlayMode.REPEAT_ONE,
_PlayMode.NORMAL,
],
(True, _mp_const.REPEAT_MODE_ALL): [
_PlayMode.RANDOM,
_PlayMode.SHUFFLE,
_PlayMode.REPEAT_ALL,
_PlayMode.NORMAL,
],
}
| rohitranjan1991/home-assistant | homeassistant/components/dlna_dmr/const.py | Python | mit | 7,362 |
from django.core.management.base import BaseCommand
from tenant_only.models import TableTwo
class Command(BaseCommand):
help = 'Test table two'
def add_arguments(self, parser):
parser.add_argument('--id', nargs='+', type=int)
def handle(self, *args, **options):
print(options['id'])
table_two = TableTwo.objects.filter(pk__in=options['id'])
print(table_two)
| tomturner/django-tenants | examples/tenant_tutorial/tenant_only/management/commands/dtest.py | Python | mit | 409 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import smart_selects.db_fields
class Migration(migrations.Migration):
dependencies = [
('lugar', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='DiasEfectivoLLuvia',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('mes', models.IntegerField(choices=[(1, b'Enero'), (2, b'Febrero'), (3, b'Marzo'), (4, b'Abril'), (5, b'Mayo'), (6, b'Junio'), (7, b'Julio'), (8, b'Agosto'), (9, b'Septiembre'), (10, b'Octubre'), (11, b'Noviembre'), (12, b'Diciembre')])),
('year', models.IntegerField(verbose_name=b'A\xc3\xb1o')),
('dias_lluvia', models.FloatField()),
('comunidad', models.ForeignKey(to='lugar.Comunidad')),
('departamento', models.ForeignKey(to='lugar.Departamento')),
('municipio', models.ForeignKey(to='lugar.Municipio')),
('pais', models.ForeignKey(to='lugar.Pais')),
],
options={
'verbose_name': 'Dias Efectivo de LLuvia',
'verbose_name_plural': 'Dias Efectivo de LLuvia',
},
),
migrations.CreateModel(
name='Precipitacion',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('mes', models.IntegerField(choices=[(1, b'Enero'), (2, b'Febrero'), (3, b'Marzo'), (4, b'Abril'), (5, b'Mayo'), (6, b'Junio'), (7, b'Julio'), (8, b'Agosto'), (9, b'Septiembre'), (10, b'Octubre'), (11, b'Noviembre'), (12, b'Diciembre')])),
('year', models.IntegerField(verbose_name=b'A\xc3\xb1o')),
('precipitacion', models.FloatField()),
('total_precipitacion', models.FloatField(editable=False)),
('comunidad', smart_selects.db_fields.ChainedForeignKey(chained_model_field=b'municipio', chained_field=b'municipio', blank=True, auto_choose=True, to='lugar.Comunidad', null=True)),
('departamento', smart_selects.db_fields.ChainedForeignKey(chained_model_field=b'pais', chained_field=b'pais', auto_choose=True, to='lugar.Departamento')),
('municipio', smart_selects.db_fields.ChainedForeignKey(chained_model_field=b'departamento', chained_field=b'departamento', auto_choose=True, to='lugar.Municipio')),
('pais', models.ForeignKey(to='lugar.Pais')),
],
options={
'verbose_name': 'Precipitaci\xf3n',
'verbose_name_plural': 'Precipitaci\xf3n',
},
),
migrations.CreateModel(
name='Temperatura',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('mes', models.IntegerField(choices=[(1, b'Enero'), (2, b'Febrero'), (3, b'Marzo'), (4, b'Abril'), (5, b'Mayo'), (6, b'Junio'), (7, b'Julio'), (8, b'Agosto'), (9, b'Septiembre'), (10, b'Octubre'), (11, b'Noviembre'), (12, b'Diciembre')])),
('year', models.IntegerField(verbose_name=b'A\xc3\xb1o')),
('temperatura', models.FloatField()),
('total_temperatura', models.FloatField(editable=False)),
('comunidad', smart_selects.db_fields.ChainedForeignKey(chained_model_field=b'municipio', chained_field=b'municipio', blank=True, auto_choose=True, to='lugar.Comunidad', null=True)),
('departamento', smart_selects.db_fields.ChainedForeignKey(chained_model_field=b'pais', chained_field=b'pais', auto_choose=True, to='lugar.Departamento')),
('municipio', smart_selects.db_fields.ChainedForeignKey(chained_model_field=b'departamento', chained_field=b'departamento', auto_choose=True, to='lugar.Municipio')),
('pais', models.ForeignKey(to='lugar.Pais')),
],
options={
'verbose_name': 'Temperatura',
'verbose_name_plural': 'Temperatura',
},
),
]
| shiminasai/mapafinca | clima/migrations/0001_initial.py | Python | mit | 4,223 |
"""Contains classes for refinement engines. Refinery is the shared interface,
LevenbergMarquardtIterations, GaussNewtonIterations, SimpleLBFGS and LBFGScurvs
are the current concrete implementations"""
from __future__ import annotations
import copy
import json
import logging
from io import StringIO
from typing import List, Union
import libtbx
from libtbx import easy_mp
from libtbx.phil import parse
from scitbx import lbfgs, sparse
from scitbx.array_family import flex
from scitbx.lstbx import normal_eqns, normal_eqns_solving
from dials.algorithms.refinement import DialsRefineRuntimeError
from .target import Target
logger = logging.getLogger(__name__)
# termination reason strings
TARGET_ACHIEVED = "RMSD target achieved"
RMSD_CONVERGED = "RMSD no longer decreasing"
STEP_TOO_SMALL = "Step too small"
OBJECTIVE_INCREASE = "Refinement failure: objective increased"
MAX_ITERATIONS = "Reached maximum number of iterations"
MAX_TRIAL_ITERATIONS = "Reached maximum number of consecutive unsuccessful trial steps"
DOF_TOO_LOW = "Not enough degrees of freedom to refine"
refinery_phil_str = """
refinery
.help = "Parameters to configure the refinery"
.expert_level = 1
{
engine = SimpleLBFGS LBFGScurvs GaussNewton *LevMar SparseLevMar
.help = "The minimisation engine to use"
.type = choice
max_iterations = None
.help = "Maximum number of iterations in refinement before termination."
"None implies the engine supplies its own default."
.type = int(value_min=1)
log = None
.help = "Filename for an optional log that a minimisation engine may use"
"to write additional information"
.type = path
journal
.help = "Extra items to track in the refinement history"
{
track_step = False
.help = "Record parameter shifts history in the refinement journal, if"
"the engine supports it."
.type = bool
track_gradient = False
.help = "Record parameter gradients history in the refinement journal, if"
"the engine supports it."
.type = bool
track_parameter_correlation = False
.help = "Record correlation matrix between columns of the Jacobian for"
"each step of refinement."
.type = bool
track_condition_number = False
.help = "Record condition number of the Jacobian for each step of "
"refinement."
.type = bool
track_out_of_sample_rmsd = False
.type = bool
.help = "Record RMSDs calculated using the refined experiments with"
"reflections not used in refinement at each step. Only valid if a"
"subset of input reflections was taken for refinement"
}
}
"""
refinery_phil_scope = parse(refinery_phil_str)
class Journal(dict):
"""Container in which to store information about refinement history.
This is simply a dict but provides some extra methods for access that
maintain values as columns in a table. Refinery classes will use these methods
while entering data to ensure the table remains consistent. Methods inherited
from dict are not hidden for ease of use of this object when returned to the
user."""
reason_for_termination = None
_nrows = 0
def get_nrows(self):
return self._nrows
def add_column(self, key):
"""Add a new column named by key"""
self[key] = [None] * self._nrows
def add_row(self):
"""Add an element to the end of each of the columns. Fail if any columns
are the wrong length"""
for k in self:
assert len(self[k]) == self._nrows
self[k].append(None)
self._nrows += 1
def del_last_row(self):
"""Delete the last element from the each of the columns. Fail if any columns
are the wrong length"""
if self._nrows == 0:
return None
for k in self:
assert len(self[k]) == self._nrows
self[k].pop()
self._nrows -= 1
def set_last_cell(self, key, value):
"""Set last cell in column given by key to value. Fail if the column is the
wrong length"""
assert len(self[key]) == self._nrows
self[key][-1] = value
def to_json_file(self, filename):
d = {"attributes": self.__dict__, "data": dict(self)}
with open(filename, "w") as f:
json.dump(d, f)
@classmethod
def from_json_file(cls, filename):
with open(filename) as f:
d = json.load(f)
j = cls()
j.update(d["data"])
for key in d["attributes"]:
setattr(j, key, d["attributes"][key])
return j
class Refinery:
"""Interface for Refinery objects. This should be subclassed and the run
method implemented."""
# NOTES. A Refinery is initialised with a Target function. The target
# function already contains a ReflectionManager (which holds the data) so
# there's no need to pass the data in here. In fact the Target
# class does the bulk of the work, as it also does the reflection prediction
# to get the updated predictions on each cycle. This should make some sense
# as the target function is inextricably linked to the space in which
# predictions are made (e.g. detector space, phi), so it is not general
# enough to sit abstractly above the prediction.
# This keeps the Refinery simple and able to be focused only on generic
# features of managing a refinement run, like reporting results and checking
# termination criteria.
# The prediction values come from a PredictionParameterisation object.
# This is also referred to by the Target function, but it makes sense for
# Refinery to be able to refer to it directly. So refinery should keep a
# separate link to its PredictionParameterisation.
def __init__(
self,
target: Target,
prediction_parameterisation,
constraints_manager=None,
log=None,
tracking=None,
max_iterations=None,
):
# reference to PredictionParameterisation, Target and ConstraintsManager
# objects
self._parameters = prediction_parameterisation
self._target = target
self._constr_manager = constraints_manager
# initial parameter values
self.x = flex.double(self._parameters.get_param_vals())
if self._constr_manager is not None:
self.x = self._constr_manager.constrain_parameters(self.x)
self.old_x = None
# undefined initial functional and gradients values
self._f = None
self._g = None
self._jacobian: Union[flex.double, sparse.matrix, None] = None
# filename for an optional log file
self._log = log
self._target_achieved = False
self._max_iterations = max_iterations
# attributes for journalling functionality, based on lstbx's
# journaled_non_linear_ls class
if tracking is None:
# set default tracking
tracking = refinery_phil_scope.extract().refinery.journal
self.history = Journal()
self.history.add_column("num_reflections")
self.history.add_column("objective") # flex.double()
if tracking.track_gradient:
self.history.add_column("gradient")
self.history.add_column("gradient_norm") # flex.double()
if tracking.track_parameter_correlation:
self.history.add_column("parameter_correlation")
if tracking.track_step:
self.history.add_column("solution")
if tracking.track_out_of_sample_rmsd:
self.history.add_column("out_of_sample_rmsd")
self.history.add_column("solution_norm") # flex.double()
self.history.add_column("parameter_vector")
self.history.add_column("parameter_vector_norm") # flex.double()
self.history.add_column("rmsd")
if tracking.track_condition_number:
self.history.add_column("condition_number")
# number of processes to use, for engines that support multiprocessing
self._nproc = 1
self.prepare_for_step()
def get_num_steps(self):
return self.history.get_nrows() - 1
def prepare_for_step(self):
"""Update the parameterisation and prepare the target function"""
x = self.x
if self._constr_manager is not None:
x = self._constr_manager.expand_parameters(x)
# set current parameter values
self._parameters.set_param_vals(x)
# do reflection prediction
self._target.predict()
def update_journal(self):
"""Append latest step information to the journal attributes"""
# add step quantities to journal
self.history.add_row()
self.history.set_last_cell("num_reflections", self._target.get_num_matches())
self.history.set_last_cell("rmsd", self._target.rmsds())
self.history.set_last_cell(
"parameter_vector", self._parameters.get_param_vals()
)
self.history.set_last_cell("objective", self._f)
if "gradient" in self.history:
self.history.set_last_cell("gradient", self._g)
if "parameter_correlation" in self.history and self._jacobian is not None:
resid_names = [s.replace("RMSD_", "") for s in self._target.rmsd_names]
# split Jacobian into dense matrix blocks corresponding to each residual
jblocks = self.split_jacobian_into_blocks()
corrmats = {}
for r, j in zip(resid_names, jblocks):
corrmats[r] = self._packed_corr_mat(j)
self.history.set_last_cell("parameter_correlation", corrmats)
if "condition_number" in self.history and self._jacobian is not None:
self.history.set_last_cell(
"condition_number", self.jacobian_condition_number()
)
if "out_of_sample_rmsd" in self.history:
preds = self._target.predict_for_free_reflections()
self.history.set_last_cell(
"out_of_sample_rmsd", self._target.rmsds_for_reflection_table(preds)
)
def split_jacobian_into_blocks(self) -> List[flex.double]:
"""Split the Jacobian into blocks each corresponding to a separate
residual, converting sparse to flex.double if appropriate"""
nblocks = len(self._target.rmsd_names)
try:
# The Jacobian might be a sparse matrix
j: flex.double = self._jacobian.as_dense_matrix()
except AttributeError:
j = self._jacobian
nr, nc = j.all()
nr_block = int(nr / nblocks)
row_start = [e * nr_block for e in range(nblocks)]
blocks = [j.matrix_copy_block(rs, 0, nr_block, nc) for rs in row_start]
return blocks
@staticmethod
def _packed_corr_mat(m: flex.double) -> List[float]:
"""Return a list containing the upper diagonal values of the
correlation matrix calculated between columns of 2D matrix flex.double
matrix m"""
_, nc = m.all()
tmp = []
for col1 in range(nc):
for col2 in range(col1, nc):
if col1 == col2:
tmp.append(1.0)
else:
# Avoid spuriously high correlation between a column that should be
# zero (such as the gradient of X residuals wrt the Shift2 parameter)
# and another column (such as the gradient of X residuals wrt the
# Dist parameter) by rounding values to 15 places. It seems that such
# spurious correlations may occur in cases where gradients are
# calculated to be zero by matrix operations, rather than set to zero.
v1 = m.matrix_copy_column(col1).round(15)
v2 = m.matrix_copy_column(col2).round(15)
tmp.append(flex.linear_correlation(v1, v2).coefficient())
return tmp
def get_correlation_matrix_for_step(self, step):
"""For each type of residual (e.g. X, Y, Phi), decompress and return the
full 2D correlation matrix between columns of the Jacobian that was
stored in the journal at the given step number. If not available, return
None"""
if "parameter_correlation" not in self.history:
return None
try:
packed_mats = self.history["parameter_correlation"][step]
except IndexError:
return None
if packed_mats is None:
return None
packed_mats = copy.deepcopy(packed_mats)
nparam = len(self._parameters)
for k, v in packed_mats.items():
corr_mat = flex.double(flex.grid(nparam, nparam))
i = 0
for row in range(nparam):
for col in range(row, nparam):
corr_mat[row, col] = v[i]
i += 1
corr_mat.matrix_copy_upper_to_lower_triangle_in_place()
packed_mats[k] = corr_mat
return packed_mats
def jacobian_condition_number(self):
"""Calculate the condition number of the Jacobian, for tracking in the
refinement journal, if requested. The condition number of a matrix A is
defined as cond(A) = ||A|| ||inv(A)||. For a rectangular matrix the inverse
operation refers to the Moore-Penrose pseudoinverse. Various matrix norms
can be used, resulting in numerically different condition numbers, however
the 2-norm is commonly used. In that case, the definition is equivalent
to the ratio of the largest to smallest singular values of the matrix:
cond(A) = sig_(A) / sig_min(A). That is the calculation that is performed
here.
The condition number is a measure of how accurate the solution x to the
equation Ax = b will be. Essentially it measures how errors are amplified
through the linear equation. The condition number is large in the case that
the columns of A are nearly linearly-dependent (and infinite for a singular
matrix). We use it here then to detect situations where the correlation
between effects of different parameter shifts becomes large and therefore
refinement is problematic.
Note, the Jacobian used here does not include any additional rows due to
restraints terms that might be applied, or any parameter reduction due to
constraints. Therefore this condition number relates to the pure linearised
(Gauss-Newton) step, which might not actually be what the refinement engine
uses. It can be indicative of issues in the fundamental set up of the least
squares problem, even if these issues are avoided in practice (e.g. by
use of an algorithm like Levenberg-Marquardt, inclusion of restraints or
parameter reduction).
"""
try:
# The Jacobian might be a sparse matrix
j = self._jacobian.as_dense_matrix().deep_copy()
except AttributeError:
j = self._jacobian.deep_copy()
from scitbx.linalg.svd import real as svd_real
svd = svd_real(j, False, False)
# The condition number is the ratio of the largest to the smallest singular
# values of the matrix
return max(svd.sigma) / min(svd.sigma)
def test_for_termination(self):
"""Return True if refinement should be terminated"""
# Basic version delegate to the Target class. Derived classes may
# implement other termination criteria
self._target_achieved = self._target.achieved()
return self._target_achieved
def test_rmsd_convergence(self):
"""Test for convergence of RMSDs"""
# http://en.wikipedia.org/wiki/
# Non-linear_least_squares#Convergence_criteria
try:
r1 = self.history["rmsd"][-1]
r2 = self.history["rmsd"][-2]
except IndexError:
return False
tests = [
abs((e[1] - e[0]) / e[1]) < 0.0001 if e[1] > 0 else True
for e in zip(r1, r2)
]
return all(tests)
def test_objective_increasing_but_not_nref(self):
"""Test for an increase in the objective value between steps. This
could be caused simply by the number of matches between observations
and predictions increasing. However, if the number of matches stayed
the same or reduced then this is a bad sign."""
try:
l1 = self.history["objective"][-1]
l2 = self.history["objective"][-2]
n1 = self.history["num_reflections"][-1]
n2 = self.history["num_reflections"][-2]
except IndexError:
return False
return l1 > l2 and n1 <= n2
def set_nproc(self, nproc):
"""Set number of processors for multiprocessing. Override in derived classes
if a policy dictates that this must not be user-controlled"""
self._nproc = nproc
def run(self):
"""
To be implemented by derived class. It is expected that each step of
refinement be preceded by a call to prepare_for_step and followed by
calls to update_journal and test_for_termination (in that order).
"""
# Specify a minimizer and its parameters, and run
raise NotImplementedError()
class DisableMPmixin:
"""A mixin class that disables setting of nproc for multiprocessing"""
def set_nproc(self, nproc):
if nproc != 1:
raise NotImplementedError()
class AdaptLbfgs(Refinery):
"""Adapt Refinery for L-BFGS minimiser"""
def __init__(self, *args, **kwargs):
Refinery.__init__(self, *args, **kwargs)
self._termination_params = lbfgs.termination_parameters(
max_iterations=self._max_iterations
)
self._log_string = StringIO
def compute_functional_and_gradients(self):
L, dL_dp, _ = self.compute_functional_gradients_and_curvatures()
self._f = L
self._g = dL_dp
return self._f, self._g
def compute_functional_gradients_and_curvatures(self):
self.prepare_for_step()
# observation terms
blocks = self._target.split_matches_into_blocks(nproc=self._nproc)
if self._nproc > 1:
task_results = easy_mp.parallel_map(
func=self._target.compute_functional_gradients_and_curvatures,
iterable=blocks,
processes=self._nproc,
method="multiprocessing",
preserve_exception_message=True,
)
else:
task_results = [
self._target.compute_functional_gradients_and_curvatures(block)
for block in blocks
]
# reduce blockwise results
flist, glist, clist = zip(*task_results)
f = sum(flist)
g = [sum(g) for g in zip(*glist)]
c = [sum(c) for c in zip(*clist)]
# restraints terms
restraints = (
self._target.compute_restraints_functional_gradients_and_curvatures()
)
if restraints:
f += restraints[0]
g = [a + b for a, b in zip(g, restraints[1])]
c = [a + b for a, b in zip(c, restraints[2])]
# compact and reorder according to the constraints
if self._constr_manager is not None:
g = self._constr_manager.constrain_gradient_vector(g)
c = self._constr_manager.constrain_gradient_vector(c)
return f, flex.double(g), flex.double(c)
def callback_after_step(self, minimizer):
"""
Do journalling, evaluate rmsds and return True if the target is
reached to terminate the refinement.
"""
self.update_journal()
logger.debug("Step %d", self.history.get_nrows() - 1)
if self.test_for_termination():
self.history.reason_for_termination = TARGET_ACHIEVED
return True
if self.test_rmsd_convergence():
self.history.reason_for_termination = RMSD_CONVERGED
return True
return False
def run_lbfgs(self, curvatures=False):
"""
Run the minimiser, keeping track of its log.
"""
ref_log = self._log_string()
if curvatures:
self.diag_mode = "always"
self.minimizer = lbfgs.run(
target_evaluator=self,
termination_params=self._termination_params,
log=ref_log,
)
log = ref_log.getvalue()
if self._log:
with open(self._log, "a") as f:
f.write(log)
ref_log.close()
pos = log.rfind("lbfgs minimizer stop: ")
if pos >= 0:
msg = log[pos:].splitlines()[0]
if self.history.reason_for_termination:
self.history.reason_for_termination += "\n"
self.history.reason_for_termination += msg
else:
self.history.reason_for_termination = msg
if self.minimizer.error:
self.history.reason_for_termination = self.minimizer.error
class SimpleLBFGS(AdaptLbfgs):
"""Refinery implementation, using cctbx LBFGS with basic settings"""
def run(self):
return self.run_lbfgs(curvatures=False)
class LBFGScurvs(AdaptLbfgs):
"""Refinery implementation using cctbx LBFGS with curvatures"""
def run(self):
return self.run_lbfgs(curvatures=True)
def compute_functional_gradients_diag(self):
L, dL_dp, curvs = self.compute_functional_gradients_and_curvatures()
self._f = L
self._g = dL_dp
# Curvatures of zero will cause a crash, because their inverse is taken.
assert curvs.all_gt(0.0)
diags = 1.0 / curvs
msg = " curv: " + "%.5f " * len(tuple(curvs))
logger.debug(msg, *curvs)
return self._f, self._g, diags
class AdaptLstbx(Refinery, normal_eqns.non_linear_ls, normal_eqns.non_linear_ls_mixin):
"""Adapt Refinery for lstbx"""
def __init__(
self,
target,
prediction_parameterisation,
constraints_manager=None,
log=None,
tracking=None,
max_iterations=None,
):
Refinery.__init__(
self,
target,
prediction_parameterisation,
constraints_manager,
log=log,
tracking=tracking,
max_iterations=max_iterations,
)
# required for restart to work (do I need that method?)
self.x_0 = self.x.deep_copy()
# keep attribute for the Cholesky factor required for ESD calculation
self.cf = None
normal_eqns.non_linear_ls.__init__(self, n_parameters=len(self.x))
def restart(self):
self.x = self.x_0.deep_copy()
self.old_x = None
def parameter_vector_norm(self):
return self.x.norm()
def build_up(self, objective_only=False):
# code here to calculate the residuals. Rely on the target class
# for this
# I need to use the weights. They are the variances of the
# observations... See http://en.wikipedia.org/wiki/Non-linear_least_squares
# at 'diagonal weight matrix'
# set current parameter values
self.prepare_for_step()
# Reset the state to construction time, i.e. no equations accumulated
self.reset()
# observation terms
if objective_only:
residuals, weights = self._target.compute_residuals()
self.add_residuals(residuals, weights)
else:
blocks = self._target.split_matches_into_blocks(nproc=self._nproc)
if self._nproc > 1:
# ensure the jacobian is not tracked
self._jacobian = None
# processing functions
def task_wrapper(block):
(
residuals,
jacobian,
weights,
) = self._target.compute_residuals_and_gradients(block)
return {
"residuals": residuals,
"jacobian": jacobian,
"weights": weights,
}
def callback_wrapper(result):
j = result["jacobian"]
if self._constr_manager is not None:
j = self._constr_manager.constrain_jacobian(j)
self.add_equations(result["residuals"], j, result["weights"])
# no longer need the result
result["residuals"] = None
result["jacobian"] = None
result["weights"] = None
return
easy_mp.parallel_map(
func=task_wrapper,
iterable=blocks,
processes=self._nproc,
callback=callback_wrapper,
method="multiprocessing",
preserve_exception_message=True,
)
else:
for block in blocks:
(
residuals,
self._jacobian,
weights,
) = self._target.compute_residuals_and_gradients(block)
j = self._jacobian
if self._constr_manager is not None:
j = self._constr_manager.constrain_jacobian(j)
self.add_equations(residuals, j, weights)
# restraints terms
restraints = self._target.compute_restraints_residuals_and_gradients()
if restraints:
if objective_only:
self.add_residuals(restraints[0], restraints[2])
else:
j = restraints[1]
if self._constr_manager is not None:
j = self._constr_manager.constrain_jacobian(j)
self.add_equations(restraints[0], j, restraints[2])
def step_forward(self):
self.old_x = self.x.deep_copy()
self.x += self.step()
def step_backward(self):
if self.old_x is None:
return False
else:
self.x, self.old_x = self.old_x, None
return True
def set_cholesky_factor(self):
"""Set the Cholesky factor required for ESD calculation. This method is
valid only for the LSTBX dense matrix interface"""
self.cf = self.step_equations().cholesky_factor_packed_u().deep_copy()
def calculate_esds(self):
"""Calculate ESDs of parameters"""
# it is possible to get here with zero steps taken by the minimiser. For
# example by failing for the MAX_TRIAL_ITERATIONS reason before any forward
# steps are taken with the LevMar engine. If so the below is invalid,
# so return early
if self.history.get_nrows() == 0:
return None
if self.cf is None:
return None
# if constraints were used then the normal matrix has fewer rows/columns
# than the number of expanded parameters. At the moment, do not support
# this calculation when constraints were used
if self._constr_manager is not None:
return None
# invert normal matrix from N^-1 = (U^-1)(U^-1)^T
cf_inv = self.cf.matrix_packed_u_as_upper_triangle().matrix_inversion()
nm_inv = cf_inv.matrix_multiply_transpose(cf_inv)
# keep the estimated parameter variance-covariance matrix
self.parameter_var_cov = self.history["reduced_chi_squared"][-1] * nm_inv
# send this back to the models to calculate their uncertainties
self._parameters.calculate_model_state_uncertainties(self.parameter_var_cov)
# send parameter variances back to the parameter classes
# themselves, for reporting purposes and for building restraints
# based on existing parameterisations.
s2 = self.parameter_var_cov.matrix_diagonal()
assert s2.all_ge(0.0)
s = flex.sqrt(s2)
self._parameters.set_param_esds(s)
def _print_normal_matrix(self):
"""Print the full normal matrix at the current step. For debugging only"""
logger.debug("The normal matrix for the current step is:")
logger.debug(
self.normal_matrix_packed_u()
.matrix_packed_u_as_symmetric()
.as_scitbx_matrix()
.matlab_form(format=None, one_row_per_line=True)
)
logger.debug("\n")
class GaussNewtonIterations(AdaptLstbx, normal_eqns_solving.iterations):
"""Refinery implementation, using lstbx Gauss Newton iterations"""
# defaults that may be overridden
gradient_threshold = 1.0e-10
step_threshold = None
damping_value = 0.0007
max_shift_over_esd = 15
convergence_as_shift_over_esd = 1e-5
def __init__(
self,
target,
prediction_parameterisation,
constraints_manager=None,
log=None,
tracking=None,
max_iterations=20,
**kwds,
):
AdaptLstbx.__init__(
self,
target,
prediction_parameterisation,
constraints_manager,
log=log,
tracking=tracking,
max_iterations=max_iterations,
)
# add an attribute to the journal
self.history.add_column("reduced_chi_squared") # flex.double()
# adopt any overrides of the defaults above
libtbx.adopt_optional_init_args(self, kwds)
def run(self):
self.n_iterations = 0
# prepare for first step
self.build_up()
# return early if refinement is not possible
if self.dof < 1:
self.history.reason_for_termination = DOF_TOO_LOW
return
while True:
# set functional and gradients for the step (to add to the history)
self._f = self.objective()
self._g = -self.opposite_of_gradient()
# cache some items for the journal prior to solve
pvn = self.parameter_vector_norm()
gn = self.opposite_of_gradient().norm_inf()
# solve the normal equations
self.solve()
# standard journalling
self.update_journal()
logger.debug("Step %d", self.history.get_nrows() - 1)
# add cached items to the journal
self.history.set_last_cell("parameter_vector_norm", pvn)
self.history.set_last_cell("gradient_norm", gn)
# extra journalling post solve
if "solution" in self.history:
self.history.set_last_cell("solution", self.actual.step().deep_copy())
self.history.set_last_cell("solution_norm", self.step().norm())
self.history.set_last_cell("reduced_chi_squared", self.chi_sq())
# test termination criteria
if self.test_for_termination():
self.history.reason_for_termination = TARGET_ACHIEVED
break
if self.test_rmsd_convergence():
self.history.reason_for_termination = RMSD_CONVERGED
break
if self.had_too_small_a_step():
self.history.reason_for_termination = STEP_TOO_SMALL
break
if self.test_objective_increasing_but_not_nref():
self.history.reason_for_termination = OBJECTIVE_INCREASE
if self.step_backward():
self.history.reason_for_termination += (
". Parameters set back one step"
)
self.prepare_for_step()
break
if self.n_iterations == self._max_iterations:
self.history.reason_for_termination = MAX_ITERATIONS
break
# prepare for next step
self.step_forward()
self.n_iterations += 1
self.build_up()
self.set_cholesky_factor()
self.calculate_esds()
class LevenbergMarquardtIterations(GaussNewtonIterations):
"""Refinery implementation, employing lstbx Levenberg Marquadt
iterations"""
tau = 1e-3
@property
def mu(self):
return self._mu
@mu.setter
def mu(self, value):
self._mu = value
def setup_mu(self):
"""Setup initial value for mu"""
a = self.normal_matrix_packed_u()
self.mu = self.tau * flex.max(a.matrix_packed_u_diagonal())
def add_constant_to_diagonal(self, mu):
"""Add the constant value mu to the diagonal of the normal matrix"""
a = self.normal_matrix_packed_u()
a.matrix_packed_u_diagonal_add_in_place(self.mu)
def report_progress(self, objective):
"""Callback within the refinement main loop that can be overridden to
report the value of the objective function (and possibly) other details for
long-running methods"""
pass
def _run_core(self):
# add an attribute to the journal
self.history.add_column("mu")
self.history.add_column("nu")
# set max iterations if not already.
if self._max_iterations is None:
self._max_iterations = 100
self.n_iterations = 0
nu = 2
self.build_up()
# early test for linear independence, require all right hand side elements to be non-zero
RHS = self.step_equations().right_hand_side()
if RHS.count(0.0) > 0:
p_names = [
b for a, b in zip(RHS, self._parameters.get_param_names()) if a == 0.0
]
raise DialsRefineRuntimeError(
f"The normal equations have an indeterminate solution. The problematic parameters are {', '.join(p_names)}."
)
# return early if refinement is not possible
if self.dof < 1:
self.history.reason_for_termination = DOF_TOO_LOW
return
self.setup_mu()
while True:
# set functional and gradients for the step
self._f = self.objective()
self._g = -self.opposite_of_gradient()
# cache some items for the journal prior to solve
pvn = self.parameter_vector_norm()
gn = self.opposite_of_gradient().norm_inf()
self.add_constant_to_diagonal(self.mu)
# solve the normal equations
self.solve()
# keep the cholesky factor for ESD calculation if we end this step. Doing
# it here ensures the normal equations are solved (cholesky_factor_packed_u
# can only be called if that is the case)
self.set_cholesky_factor()
# standard journalling
self.update_journal()
logger.debug("Step %d", self.history.get_nrows() - 1)
# add cached items to the journal
self.history.set_last_cell("parameter_vector_norm", pvn)
self.history.set_last_cell("gradient_norm", gn)
# extra journalling post solve
self.history.set_last_cell("mu", self.mu)
self.history.set_last_cell("nu", nu)
if "solution" in self.history:
self.history.set_last_cell("solution", self.actual.step().deep_copy())
self.history.set_last_cell("solution_norm", self.step().norm())
self.history.set_last_cell("reduced_chi_squared", self.chi_sq())
# test termination criteria before taking the next forward step
if self.had_too_small_a_step():
self.history.reason_for_termination = STEP_TOO_SMALL
break
if self.test_for_termination():
self.history.reason_for_termination = TARGET_ACHIEVED
break
if self.test_rmsd_convergence():
self.history.reason_for_termination = RMSD_CONVERGED
break
if self.n_iterations == self._max_iterations:
self.history.reason_for_termination = MAX_ITERATIONS
break
h = self.step()
expected_decrease = 0.5 * h.dot(self.mu * h - self._g)
self.step_forward()
self.n_iterations += 1
self.build_up(objective_only=True)
objective_new = self.objective()
self.report_progress(objective_new)
actual_decrease = self._f - objective_new
rho = actual_decrease / expected_decrease
if rho > 0:
self.mu *= max(1 / 3, 1 - (2 * rho - 1) ** 3)
nu = 2
else:
self.step_backward()
self.history.del_last_row()
if nu >= 8192:
self.history.reason_for_termination = MAX_TRIAL_ITERATIONS
break
self.mu *= nu
nu *= 2
# prepare for next step
self.build_up()
self.calculate_esds()
def run(self):
self._run_core()
self.calculate_esds()
| dials/dials | algorithms/refinement/engine.py | Python | bsd-3-clause | 37,272 |
from django.views.generic import TemplateView
from main.mixins import MenuContextMixin
from project.helpers.cache_control_view import CacheMixin
class MockupView(MenuContextMixin, CacheMixin, TemplateView):
pass
class HomepageView(MenuContextMixin, CacheMixin, TemplateView):
template_name = 'index.html'
cache_max_age = 60
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
| makeev/django-boilerplate | back/main/views/pages.py | Python | mit | 461 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for Tango on Android plugins."""
import unittest
from plaso.lib import definitions
from plaso.parsers.sqlite_plugins import tango_android
from tests.parsers.sqlite_plugins import test_lib
class TangoAndroidProfileTest(test_lib.SQLitePluginTestCase):
"""Tests for Tango on Android profile database plugin."""
def testProcess(self):
"""Test the Process function on a Tango Android file."""
plugin = tango_android.TangoAndroidProfilePlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['tango_android_profile.db'], plugin)
# We should have 115 events in total with no warnings.
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 115)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetSortedEvents())
# Test a contact last active event.
expected_event_values = {
'birthday': '1980-10-01',
'data_type': 'tango:android:contact',
'date_time': '2016-01-15 13:21:45.624',
'distance': 39.04880905,
'first_name': 'Rouel',
'friend_request_message': 'I am following you on Tango',
'friend_request_type': 'outRequest',
'gender': 'male',
'is_friend': False,
'last_name': 'Henry',
'status': 'Praying!',
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_ACTIVE}
self.CheckEventValues(storage_writer, events[14], expected_event_values)
# Test a contact last access event.
expected_event_values = {
'data_type': 'tango:android:contact',
'date_time': '2016-01-15 14:35:20.633',
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_ACCESS}
self.CheckEventValues(storage_writer, events[57], expected_event_values)
# Test a contact request sent event.
expected_event_values = {
'data_type': 'tango:android:contact',
'date_time': '2016-01-15 14:35:20.436',
'timestamp_desc': definitions.TIME_DESCRIPTION_SENT}
self.CheckEventValues(storage_writer, events[56], expected_event_values)
class TangoAndroidTCTest(test_lib.SQLitePluginTestCase):
"""Tests for Tango on Android tc databases plugin."""
def testProcess(self):
"""Test the Process function on a Tango Android file."""
plugin = tango_android.TangoAndroidTCPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['tango_android_tc.db'], plugin)
# We should have 43 events in total with no warnings.
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 43)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetSortedEvents())
# Test the a conversation event.
expected_event_values = {
'conversation_identifier': 'DyGWr_010wQM_ozkIe-9Ww',
'data_type': 'tango:android:conversation',
'date_time': 'Not set',
'timestamp_desc': definitions.TIME_DESCRIPTION_NOT_A_TIME}
self.CheckEventValues(storage_writer, events[2], expected_event_values)
# Test a message creation event.
expected_event_values = {
'data_type': 'tango:android:message',
'date_time': '2016-01-15 14:41:33.027',
'direction': 2,
'message_identifier': 16777224,
'timestamp_desc': definitions.TIME_DESCRIPTION_CREATION}
self.CheckEventValues(storage_writer, events[21], expected_event_values)
# Test a message sent event.
expected_event_values = {
'data_type': 'tango:android:message',
'date_time': '2016-01-15 14:41:34.238',
'direction': 2,
'message_identifier': 16777224,
'timestamp_desc': definitions.TIME_DESCRIPTION_SENT}
self.CheckEventValues(storage_writer, events[22], expected_event_values)
if __name__ == '__main__':
unittest.main()
| log2timeline/plaso | tests/parsers/sqlite_plugins/tango_android.py | Python | apache-2.0 | 4,398 |
# -*- coding: utf-8 -*-
"""
Unit testing module for pytest-pylint plugin
"""
import os
import re
from textwrap import dedent
from unittest import mock
import pylint.config
import pytest
pytest_plugins = ("pytester",) # pylint: disable=invalid-name
def test_basic(testdir):
"""Verify basic pylint checks"""
testdir.makepyfile("import sys")
result = testdir.runpytest("--pylint")
assert "Missing module docstring" in result.stdout.str()
assert "Unused import sys" in result.stdout.str()
assert "Final newline missing" in result.stdout.str()
assert "passed, " not in result.stdout.str()
assert "1 failed" in result.stdout.str()
assert "Linting files" in result.stdout.str()
def test_nodeid(testdir):
"""Verify our nodeid adds a suffix"""
testdir.makepyfile(app="import sys")
result = testdir.runpytest("--pylint", "--collectonly", "--verbose")
for expected in "<PylintFile app.py>", "<PyLintItem PYLINT>":
assert expected in result.stdout.str()
def test_nodeid_no_dupepath(testdir):
"""Verify we don't duplicate the node path in our node id."""
testdir.makepyfile(app="import sys")
result = testdir.runpytest("--pylint", "--verbose")
assert re.search(
r"^FAILED\s+app\.py::PYLINT$", result.stdout.str(), flags=re.MULTILINE
)
def test_subdirectories(testdir):
"""Verify pylint checks files in subdirectories"""
subdir = testdir.mkpydir("mymodule")
testfile = subdir.join("test_file.py")
testfile.write("import sys")
result = testdir.runpytest("--pylint")
assert "[pylint] mymodule/test_file.py" in result.stdout.str()
assert "Missing module docstring" in result.stdout.str()
assert "Unused import sys" in result.stdout.str()
assert "Final newline missing" in result.stdout.str()
assert "1 failed" in result.stdout.str()
assert "Linting files" in result.stdout.str()
def test_disable(testdir):
"""Verify basic pylint checks"""
testdir.makepyfile("import sys")
result = testdir.runpytest("--pylint --no-pylint")
assert "Final newline missing" not in result.stdout.str()
assert "Linting files" not in result.stdout.str()
def test_error_control(testdir):
"""Verify that error types are configurable"""
testdir.makepyfile("import sys")
result = testdir.runpytest("--pylint", "--pylint-error-types=EF")
assert "1 passed" in result.stdout.str()
def test_pylintrc_file(testdir):
"""Verify that a specified pylint rc file will work."""
rcfile = testdir.makefile(
".rc",
"""
[FORMAT]
max-line-length=3
""",
)
testdir.makepyfile("import sys")
result = testdir.runpytest("--pylint", "--pylint-rcfile={0}".format(rcfile.strpath))
assert "Line too long (10/3)" in result.stdout.str()
def test_pylintrc_file_toml(testdir):
"""Verify that pyproject.toml can be used as a pylint rc file."""
rcfile = testdir.makefile(
".toml",
pylint="""
[tool.pylint.FORMAT]
max-line-length = "3"
""",
)
testdir.makepyfile("import sys")
result = testdir.runpytest("--pylint", "--pylint-rcfile={0}".format(rcfile.strpath))
# Parsing changed from integer to string in pylint >=2.5. Once
# support is dropped <2.5 this is removable
if "should be of type int" in result.stdout.str():
rcfile = testdir.makefile(
".toml",
pylint="""
[tool.pylint.FORMAT]
max-line-length = 3
""",
)
result = testdir.runpytest(
"--pylint", "--pylint-rcfile={0}".format(rcfile.strpath)
)
assert "Line too long (10/3)" in result.stdout.str()
def test_pylintrc_file_pyproject_toml(testdir):
"""Verify that pyproject.toml can be auto-detected as a pylint rc file."""
# pylint only auto-detects pyproject.toml from 2.5 onwards
if not hasattr(pylint.config, "find_default_config_files"):
return
testdir.makefile(
".toml",
pyproject="""
[tool.pylint.FORMAT]
max-line-length = "3"
""",
)
testdir.makepyfile("import sys")
result = testdir.runpytest("--pylint")
assert "Line too long (10/3)" in result.stdout.str()
def test_pylintrc_file_beside_ini(testdir):
"""
Verify that a specified pylint rc file will work what placed into pytest
ini dir.
"""
non_cwd_dir = testdir.mkdir("non_cwd_dir")
rcfile = non_cwd_dir.join("foo.rc")
rcfile.write(
"""
[FORMAT]
max-line-length=3
"""
)
inifile = non_cwd_dir.join("foo.ini")
inifile.write(
dedent(
"""
[pytest]
addopts = --pylint --pylint-rcfile={0}
""".format(
rcfile.basename
)
)
)
pyfile = testdir.makepyfile("import sys")
result = testdir.runpytest(pyfile.strpath)
assert "Line too long (10/3)" not in result.stdout.str()
result = testdir.runpytest("-c", inifile.strpath, pyfile.strpath)
assert "Line too long (10/3)" in result.stdout.str()
@pytest.mark.parametrize("rcformat", ("ini", "toml", "simple_toml"))
def test_pylintrc_ignore(testdir, rcformat):
"""Verify that a pylintrc file with ignores will work."""
if rcformat == "toml":
rcfile = testdir.makefile(
".toml",
"""
[tool.pylint.master]
ignore = ["test_pylintrc_ignore.py", "foo.py"]
""",
)
elif rcformat == "simple_toml":
rcfile = testdir.makefile(
".toml",
"""
[tool.pylint.MASTER]
ignore = "test_pylintrc_ignore.py,foo.py"
""",
)
else:
rcfile = testdir.makefile(
".rc",
"""
[MASTER]
ignore = test_pylintrc_ignore.py
""",
)
testdir.makepyfile("import sys")
result = testdir.runpytest("--pylint", "--pylint-rcfile={0}".format(rcfile.strpath))
assert "collected 0 items" in result.stdout.str()
@pytest.mark.parametrize("rcformat", ("ini", "toml"))
def test_pylintrc_msg_template(testdir, rcformat):
"""Verify that msg-template from pylintrc file is handled."""
if rcformat == "toml":
rcfile = testdir.makefile(
".toml",
"""
[tool.pylint.REPORTS]
msg-template = "start {msg_id} end"
""",
)
else:
rcfile = testdir.makefile(
".rc",
"""
[REPORTS]
msg-template=start {msg_id} end
""",
)
testdir.makepyfile("import sys")
result = testdir.runpytest("--pylint", "--pylint-rcfile={0}".format(rcfile.strpath))
assert "start W0611 end" in result.stdout.str()
def test_multiple_jobs(testdir):
"""
Assert that the jobs argument is passed through to pylint if provided
"""
testdir.makepyfile("import sys")
with mock.patch("pytest_pylint.plugin.lint.Run") as run_mock:
jobs = 0
testdir.runpytest("--pylint", "--pylint-jobs={0}".format(jobs))
assert run_mock.call_count == 1
assert run_mock.call_args[0][0][-2:] == ["-j", str(jobs)]
def test_no_multiple_jobs(testdir):
"""
If no jobs argument is specified it should not appear in pylint arguments
"""
testdir.makepyfile("import sys")
with mock.patch("pytest_pylint.plugin.lint.Run") as run_mock:
testdir.runpytest("--pylint")
assert run_mock.call_count == 1
assert "-j" not in run_mock.call_args[0][0]
def test_skip_checked_files(testdir):
"""
Test a file twice which can pass pylint.
The 2nd time should be skipped.
"""
testdir.makepyfile(
"#!/usr/bin/env python",
'"""A hello world script."""',
"",
"from __future__ import print_function",
"",
'print("Hello world!") # pylint: disable=missing-final-newline',
)
# The 1st time should be passed
result = testdir.runpytest("--pylint")
assert "1 passed" in result.stdout.str()
# The 2nd time should be skipped
result = testdir.runpytest("--pylint")
assert "1 skipped" in result.stdout.str()
# Always be passed when cacheprovider disabled
result = testdir.runpytest("--pylint", "-p", "no:cacheprovider")
assert "1 passed" in result.stdout.str()
def test_invalidate_cache_when_config_changes(testdir):
"""If pylintrc changes, no cache should apply."""
rcfile = testdir.makefile(
".rc", "[MESSAGES CONTROL]", "disable=missing-final-newline"
)
testdir.makepyfile('"""hi."""')
result = testdir.runpytest("--pylint", "--pylint-rcfile={0}".format(rcfile.strpath))
assert "1 passed" in result.stdout.str()
result = testdir.runpytest("--pylint", "--pylint-rcfile={0}".format(rcfile.strpath))
assert "1 skipped" in result.stdout.str()
# Change RC file entirely
alt_rcfile = testdir.makefile(
".rc", alt="[MESSAGES CONTROL]\ndisable=unbalanced-tuple-unpacking"
)
result = testdir.runpytest(
"--pylint", "--pylint-rcfile={0}".format(alt_rcfile.strpath)
)
assert "1 failed" in result.stdout.str()
# Change contents of RC file
result = testdir.runpytest("--pylint", "--pylint-rcfile={0}".format(rcfile.strpath))
assert "1 passed" in result.stdout.str()
with open(rcfile, "w"):
pass
result = testdir.runpytest("--pylint", "--pylint-rcfile={0}".format(rcfile.strpath))
assert "1 failed" in result.stdout.str()
def test_output_file(testdir):
"""Verify pylint report output"""
testdir.makepyfile("import sys")
testdir.runpytest("--pylint", "--pylint-output-file=pylint.report")
output_file = os.path.join(testdir.tmpdir.strpath, "pylint.report")
assert os.path.isfile(output_file)
with open(output_file, "r") as _file:
report = _file.read()
assert (
"test_output_file.py:1: [C0304(missing-final-newline), ] Final "
"newline missing"
) in report
assert (
"test_output_file.py:1: [C0111(missing-docstring), ] Missing "
"module docstring"
) in report or (
"test_output_file.py:1: [C0114(missing-module-docstring), ] Missing "
"module docstring"
) in report
assert (
"test_output_file.py:1: [W0611(unused-import), ] Unused import sys"
) in report
def test_output_file_makes_dirs(testdir):
"""Verify output works with folders properly."""
testdir.makepyfile("import sys")
output_path = os.path.join("reports", "pylint.report")
testdir.runpytest("--pylint", "--pylint-output-file={}".format(output_path))
output_file = os.path.join(testdir.tmpdir.strpath, output_path)
assert os.path.isfile(output_file)
# Run again to make sure we don't crash trying to make a dir that exists
testdir.runpytest("--pylint", "--pylint-output-file={}".format(output_path))
@pytest.mark.parametrize(
"arg_opt_name, arg_opt_value",
[("ignore", "test_cmd_line_ignore.py"), ("ignore-patterns", ".+_ignore.py")],
ids=["ignore", "ignore-patterns"],
)
def test_cmd_line_ignore(testdir, arg_opt_name, arg_opt_value):
"""Verify that cmd line args ignores will work."""
testdir.makepyfile(test_cmd_line_ignore="import sys")
result = testdir.runpytest(
"--pylint", "--pylint-{0}={1}".format(arg_opt_name, arg_opt_value)
)
assert "collected 0 items" in result.stdout.str()
assert "Unused import sys" not in result.stdout.str()
@pytest.mark.parametrize(
"arg_opt_name, arg_opt_value",
[("ignore", "test_cmd_line_ignore_pri_arg.py"), ("ignore-patterns", ".*arg.py$")],
ids=["ignore", "ignore-patterns"],
)
def test_cmd_line_ignore_pri(testdir, arg_opt_name, arg_opt_value):
"""
Verify that command line ignores and patterns take priority over
rcfile ignores.
"""
file_ignore = "test_cmd_line_ignore_pri_file.py"
cmd_arg_ignore = "test_cmd_line_ignore_pri_arg.py"
cmd_line_ignore = arg_opt_value
rcfile = testdir.makefile(
".rc",
"""
[MASTER]
{0} = {1},foo
""".format(
arg_opt_name, file_ignore
),
)
testdir.makepyfile(**{file_ignore: "import sys", cmd_arg_ignore: "import os"})
result = testdir.runpytest(
"--pylint",
"--pylint-rcfile={0}".format(rcfile.strpath),
"--pylint-{0}={1}".format(arg_opt_name, cmd_line_ignore),
"-s",
)
assert "collected 1 item" in result.stdout.str()
assert "Unused import sys" in result.stdout.str()
| carsongee/pytest-pylint | pytest_pylint/tests/test_pytest_pylint.py | Python | mit | 12,608 |
# Copyright 2016 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import traceback
from threading import Thread
from lcm.ns.sfcs.create_flowcla import CreateFlowClassifier
from lcm.ns.sfcs.create_port_chain import CreatePortChain
from lcm.ns.sfcs.create_portpairgp import CreatePortPairGroup
from lcm.ns.sfcs.sfc_instance import SfcInstance
from lcm.ns.sfcs.utils import update_fp_status
from lcm.pub.exceptions import NSLCMException
from lcm.pub.utils.jobutil import JobUtil
logger = logging.getLogger(__name__)
class CreateSfcWorker(Thread):
def __init__(self, data):
super(CreateSfcWorker, self).__init__()
self.ns_inst_id = data["nsinstid"]
self.ns_model_data = data["ns_model_data"]
self.fp_id = data["fpindex"]
self.sdnControllerId = data["sdncontrollerid"]
self.fp_inst_id = data["fpinstid"]
self.data = data
self.job_id = ""
def init_data(self):
self.job_id = JobUtil.create_job("SFC", "sfc_init", self.ns_inst_id + "_" + self.fp_id)
return self.job_id
def run(self):
try:
logger.info("Service Function Chain Worker start : ")
CreateFlowClassifier(self.data).do_biz()
JobUtil.add_job_status(self.job_id, 50, "create flow classifer successfully!", "")
CreatePortPairGroup(self.data).do_biz()
JobUtil.add_job_status(self.job_id, 75, "create port pair group successfully!", "")
CreatePortChain(self.data).do_biz()
update_fp_status(self.fp_inst_id, "active")
JobUtil.add_job_status(self.job_id, 100, "create port chain successful!", "")
logger.info("Service Function Chain Worker end : ")
except NSLCMException as e:
self.handle_exception(e)
except Exception as e:
self.handle_exception(e)
def handle_exception(self, e):
detail = "sfc instantiation failed, detail message: %s" % e.message
JobUtil.add_job_status(self.job_id, 255, "create sfc failed!", "")
logger.error(traceback.format_exc())
logger.error(detail)
update_fp_status(self.fp_inst_id, "failed")
| open-o/nfvo | lcm/lcm/ns/sfcs/create_sfc_worker.py | Python | apache-2.0 | 2,698 |
# coding=utf-8
import re
# [oov] no longer in words.txt
OOV_TERM = '<unk>'
def load_vocabulary(words_file):
'''Load vocabulary words from an OpenFST SymbolTable formatted text file'''
return set(x.split(' ')[0] for x in words_file if x != '')
def kaldi_normalize(word, vocab):
"""
Take a token extracted from a transcript by MetaSentence and
transform it to use the same format as Kaldi's vocabulary files.
Removes fancy punctuation and strips out-of-vocabulary words.
"""
# lowercase
norm = word.lower()
# Turn fancy apostrophes into simpler apostrophes
norm = norm.replace("’", "'")
if len(norm) > 0 and not norm in vocab:
norm = OOV_TERM
return norm
class MetaSentence:
"""Maintain two parallel representations of a sentence: one for
Kaldi's benefit, and the other in human-legible form.
"""
def __init__(self, sentence, vocab):
self.raw_sentence = sentence
if type(sentence) == bytes:
self.raw_sentence = sentence.decode('utf-8')
self.vocab = vocab
self._tokenize()
def _tokenize(self):
self._seq = []
for m in re.finditer(r'(\w|\’\w|\'\w)+', self.raw_sentence, re.UNICODE):
start, end = m.span()
word = m.group()
token = kaldi_normalize(word, self.vocab)
self._seq.append({
"start": start, # as unicode codepoint offset
"end": end, # as unicode codepoint offset
"token": token,
})
def get_kaldi_sequence(self):
return [x["token"] for x in self._seq]
def get_display_sequence(self):
display_sequence = []
for x in self._seq:
start, end = x["start"], x["end"]
word = self.raw_sentence[start:end]
display_sequence.append(word)
return display_sequence
def get_text_offsets(self):
return [(x["start"], x["end"]) for x in self._seq]
| lowerquality/gentle | gentle/metasentence.py | Python | mit | 1,983 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yapf.reformatter."""
import textwrap
import unittest
from yapf.yapflib import py3compat
from yapf.yapflib import reformatter
from yapf.yapflib import style
from yapf.yapflib import verifier
from yapftests import yapf_test_helper
@unittest.skipIf(py3compat.PY3, 'Requires Python 2')
class TestVerifyNoVerify(yapf_test_helper.YAPFTest):
@classmethod
def setUpClass(cls):
style.SetGlobalStyle(style.CreatePEP8Style())
def testVerifyException(self):
unformatted_code = textwrap.dedent("""\
class ABC(metaclass=type):
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
with self.assertRaises(verifier.InternalError):
reformatter.Reformat(uwlines, verify=True)
reformatter.Reformat(uwlines) # verify should be False by default.
def testNoVerify(self):
unformatted_code = textwrap.dedent("""\
class ABC(metaclass=type):
pass
""")
expected_formatted_code = textwrap.dedent("""\
class ABC(metaclass=type):
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code,
reformatter.Reformat(uwlines, verify=False))
def testVerifyFutureImport(self):
unformatted_code = textwrap.dedent("""\
from __future__ import print_function
def call_my_function(the_function):
the_function("hi")
if __name__ == "__main__":
call_my_function(print)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
with self.assertRaises(verifier.InternalError):
reformatter.Reformat(uwlines, verify=True)
expected_formatted_code = textwrap.dedent("""\
from __future__ import print_function
def call_my_function(the_function):
the_function("hi")
if __name__ == "__main__":
call_my_function(print)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code,
reformatter.Reformat(uwlines, verify=False))
def testContinuationLineShouldBeDistinguished(self):
unformatted_code = textwrap.dedent("""\
class Foo(object):
def bar(self):
if self.solo_generator_that_is_long is None and len(
self.generators + self.next_batch) == 1:
pass
""")
expected_formatted_code = textwrap.dedent("""\
class Foo(object):
def bar(self):
if self.solo_generator_that_is_long is None and len(
self.generators + self.next_batch) == 1:
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code,
reformatter.Reformat(uwlines, verify=False))
if __name__ == '__main__':
unittest.main()
| sbc100/yapf | yapftests/reformatter_verify_test.py | Python | apache-2.0 | 3,579 |
__author__ = 'adeksandrcernov'
import mysql.connector
from model.contact import Contact
from model.group import Group
class DbFixture:
def __init__(self, host, name , user, password):
self.host = host
self.name = name
self.user = user
self.password = password
self.connection = mysql.connector.connect(host = host, database = name, user = user, password = password)
self.connection.autocommit = True
def get_contacts_list(self):
list=[]
cursor = self.connection.cursor()
try:
cursor.execute("select firstname, middlename,lastname,nickname,"
"title,address,home,mobile,work,fax"
"email,email2,email3,byear,address2,phone2 from addressbook where deprecated = '0000-00-00 00:00:00'")
for row in cursor:
(firstname,middlename,lastname,nickname,title,company,address,home_phone,mobile_phone,work_phone,fax,byear,address1,phone2,email,
email2,email3) = row
list.append(Contact(firstname= firstname, middlename=middlename, lastname=lastname,nickname = nickname, title = title,
company = company,
address = address,home_phone = home_phone, mobile_phone = mobile_phone,work_phone = work_phone, fax = fax,byear = byear,
address1 = address1,phone2 = phone2,email = email,email2 = email2, email3 = email3))
finally:
cursor.close()
return list
def get_group_list(self):
list=[]
cursor = self.connection.cursor()
try:
cursor.execute("select group_id, group_name,group_header,group_footer from group_list")
for row in cursor:
(id,name,header,footer) = row
list.append(Group(id = str(id), name = name, header=header, footer = footer))
finally:
cursor.close()
return list
def destroy(self):
self.connection.close() | AlChernoff/python_training | fixture/Db.py | Python | apache-2.0 | 2,024 |
from django import forms
from django.core.validators import MinLengthValidator
from .models import DemoUser
class DemoUserEditForm(forms.ModelForm):
"""Form for viewing and editing name fields in a DemoUser object.
A good reference for Django forms is:
http://pydanny.com/core-concepts-django-modelforms.html
"""
def __init__(self, *args, **kwargs):
# TODO: this doesn't seem to work. Need to get to the bottom of it.
#self.base_fields["display_name"].min_length = 2
#self.base_fields["display_name"].validators.append(MinLengthValidator)
#print self.base_fields['display_name'].validators
super(forms.ModelForm, self).__init__(*args, **kwargs)
class Meta:
model = DemoUser
fields = ('first_name', 'last_name', 'username')
class DemoUserAdminForm(forms.ModelForm):
class Meta:
model = DemoUser
fields = ('email', 'first_name', 'last_name', 'username', 'is_staff', 'is_active', 'date_joined')
def is_valid(self):
#log.info(force_text(self.errors))
return super(DemoUserAdminForm, self).is_valid()
| kingofsystem/demo-allauth-bootstrap | allauthdemo/auth/forms.py | Python | mit | 1,126 |
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_format23.xlsx')
def test_create_file(self):
"""Test the creation of an XlsxWriter file with chart formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [108321024, 108328448]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
'border': {'color': 'yellow'},
'fill': {'color': 'red', 'transparency': 100},
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| jmcnamara/XlsxWriter | xlsxwriter/test/comparison/test_chart_format23.py | Python | bsd-2-clause | 1,541 |
from admin import reset
from base import Database, Runtime
from hs_common.hs_cleanup import Cleanup
from user import User,UserGroup,Group,GroupPermission,Permission
import unittest
class PermissionTest(unittest.TestCase):
def setUp(self):
Runtime.enable_trace = False
reset.reset()
Runtime.enable_trace = True
def test_get_user_permission(self):
cleanup = Cleanup()
session = Database.create_sqlalchemy_session()
cleanup.push(session.close)
User.add_user_account(session, "u0", "up0")
Group.add(session, "g0", "g0")
UserGroup.join(session, "u0", "g0")
self.assertEqual(Permission.get_user_permission(session, "u0", "p0"), False)
GroupPermission.set(session, "g0", "p0", 10, True)
self.assertEqual(Permission.get_user_permission(session, "u0", "p0"), True)
GroupPermission.set(session, "g0", "p0", 20, False)
self.assertEqual(Permission.get_user_permission(session, "u0", "p0"), False)
Group.add(session, "g1", "g1")
UserGroup.join(session, "u0", "g1")
GroupPermission.set(session, "g1", "p0", 30, True)
self.assertEqual(Permission.get_user_permission(session, "u0", "p0"), True)
self.assertEqual(Permission.get_user_permission(session, "u1", "p0"), False)
self.assertEqual(Permission.get_user_permission(session, "u0", "p1"), False)
| luzi82/HiSocial | test_old/Permission.py | Python | gpl-3.0 | 1,414 |
"""This script allows us to manually merge the results from oplog and profiling
results."""
import calendar
import config
import os
import sys
import utils
from bson import BSON
def dump_op(output, op):
copier = utils.DictionaryCopier(op)
copier.copy_fields("ts", "ns", "op")
op_type = op["op"]
# handpick some essential fields to execute.
if op_type == "query":
copier.copy_fields("query", "ntoskip", "ntoreturn")
elif op_type == "insert":
copier.copy_fields("o")
elif op_type == "update":
copier.copy_fields("updateobj", "query")
elif op_type == "remove":
copier.copy_fields("query")
elif op_type == "command":
copier.copy_fields("command")
output.write(BSON.encode(copier.dest))
def merge_to_final_output(oplog_output_file, profiler_output_files, output_file):
"""
* Why merge files:
we need to merge the docs from two sources into one.
* Why not merge earlier:
It's definitely inefficient to merge the entries when we just retrieve
these documents from mongodb. However we designed this script to be able
to pull the docs from differnt servers, as a result it's hard to do the
on-time merge since you cannot determine if some "old" entries will come
later."""
oplog = open(oplog_output_file, "rb")
# create a map of profiler file names to files
profiler_files = {}
for profiler_file in profiler_output_files:
profiler_files[profiler_file] = open(profiler_file, "rb")
output = open(output_file, "wb")
logger = utils.LOG
logger.info("Starts completing the insert options")
oplog_doc = utils.unpickle(oplog)
# Create a map of tuple(doc's timestamp, profiler file name) to doc for
# each profiler. This makes it easy to fetch the earliest doc in the group
# on each iteration.
profiler_docs = {}
for file_name in profiler_files:
doc = utils.unpickle(profiler_files[file_name])
# associate doc with a tuple representing the ts and source filename
# this makes it easy to fetch the earliest doc in the group on each
# iteration
if doc:
profiler_docs[(doc["ts"], file_name)] = doc
inserts = 0
noninserts = 0
severe_inconsistencies = 0
mild_inconsistencies = 0
# read docs until either we exhaust the oplog or all ops in the profile logs
while oplog_doc and len(profiler_docs) > 0:
if (noninserts + inserts) % 2500 == 0:
logger.info("processed %d items", noninserts + inserts)
# get the earliest profile doc out of all profiler_docs
key = min(profiler_docs.keys())
profiler_doc = profiler_docs[key]
# remove the doc and fetch a new one
del(profiler_docs[key])
# the first field in the key is the file name
doc = utils.unpickle(profiler_files[key[1]])
if doc:
profiler_docs[(doc["ts"], key[1])] = doc
# If the retrieved operation is not an insert, we can simply dump it
# to the output file. Otherwise, we need to cross-reference the
# profiler's insert operation with an oplog entry (because the
# profiler doesn't contain the inserted object's details).
if profiler_doc["op"] != "insert":
dump_op(output, profiler_doc)
noninserts += 1
else:
# Compare the profile doc's ts with the oplog doc's ts. In the
# ideal scenario, every insert we capture via the profile
# collection should match a consecutive oplog entry (the oplog
# tailer only looks at insert ops).
profiler_ts = calendar.timegm(profiler_doc["ts"].timetuple())
oplog_ts = oplog_doc["ts"].time
delta = abs(profiler_ts - oplog_ts)
if delta > 3:
# TODO strictly speaking, this ain't good since the files are
# not propertly closed.
logger.error(
"oplog and profiler results are inconsistent `ts`\n"
" oplog: %d\n"
" profiler: %d", oplog_ts, profiler_ts)
severe_inconsistencies += 1
elif delta != 0:
logger.warn("Slightly inconsistent timestamp\n"
" oplog: %d\n"
" profiler %d", oplog_ts, profiler_ts)
mild_inconsistencies += 1
oplog_doc["ts"] = profiler_doc["ts"] # we still want to keep the canonical form of the ts
oplog_doc["op"] = profiler_doc["op"] # make sure "op" is "insert" instead of "i"
dump_op(output, oplog_doc)
inserts += 1
# Get the next doc from the oplog
oplog_doc = utils.unpickle(oplog)
# finish up any remaining non-insert ops
while len(profiler_docs) > 0:
# get the earliest profile doc out of all profiler_docs
key = min(profiler_docs.keys())
profiler_doc = profiler_docs[key]
# remove the doc and fetch a new one
del(profiler_docs[key])
doc = utils.unpickle(profiler_files[key[1]])
if doc:
profiler_docs[(doc["ts"], key[1])] = doc
if profiler_doc["op"] == "insert":
break
dump_op(output, profiler_doc)
noninserts += 1
logger.info("Finished completing the insert options, %d inserts and"
" %d noninserts\n"
" severe ts incosistencies: %d\n"
" mild ts incosistencies: %d\n", inserts, noninserts,
severe_inconsistencies, mild_inconsistencies)
for f in [oplog, output]:
f.close()
for f in profiler_files.values():
f.close()
# Clean up temporary files (oplog + profiler files), since everything is
# already in the main output file
for f in profiler_output_files:
os.remove(f)
os.remove(oplog_output_file)
return True
def main():
# TODO: this command is not user-friendly and doesn't do any sanity check
# for the parameters.
db_config = config.DB_CONFIG
if len(sys.argv) != 1:
params = sys.argv[1:]
merge_to_final_output(params[0], params[1], params[2])
else:
merge_to_final_output(db_config["oplog_output_file"],
db_config["profiler_output_file"],
db_config["output_file"])
if __name__ == '__main__':
main()
| ParsePlatform/flashback | record/merge.py | Python | bsd-3-clause | 6,495 |
import os
from django.conf import settings
from celery import Celery
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mygpo.settings")
celery = Celery("mygpo.celery")
celery.config_from_object("django.conf:settings", namespace="CELERY")
celery.autodiscover_tasks()
| gpodder/mygpo | mygpo/celery.py | Python | agpl-3.0 | 269 |
"""Tests for daemon working with IMAP connections."""
import logging
import os
import pathlib
import unittest
from maildaemon.config import load_config
from maildaemon.imap_cache import IMAPCache
_LOG = logging.getLogger(__name__)
_HERE = pathlib.Path(__file__).parent
_TEST_CONFIG_PATH = _HERE.joinpath('maildaemon_test_config.json')
@unittest.skipUnless(os.environ.get('TEST_COMM') or os.environ.get('CI'),
'skipping tests that require server connection')
class Tests(unittest.TestCase):
config = load_config(_TEST_CONFIG_PATH)
def test_update_folders(self):
for connection_name in ['test-imap', 'test-imap-ssl']:
with self.subTest(msg=connection_name):
c = IMAPCache.from_dict(self.config['connections'][connection_name])
c.connect()
c.update_folders()
# folder = c.folders['']
# c.delete_folder(folder) # TODO: implement IMAP folder deletion
c.update_folders()
c.disconnect()
def test_update(self):
for connection_name in ['test-imap', 'test-imap-ssl']:
with self.subTest(msg=connection_name):
# import time; time.sleep(2)
c = IMAPCache.from_dict(self.config['connections'][connection_name])
c.connect()
# c.update() # TODO: there's some cryptic error in msg id 12 in INBOX
c.disconnect()
| mbdevpl/maildaemon | test/test_imap_cache.py | Python | apache-2.0 | 1,469 |
# -*- coding: utf-8 -*-
#
# Secret Labs' Regular Expression Engine
#
# convert template to internal format
#
# Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
import _sre, sys
import sre_parse
from sre_constants import *
assert _sre.MAGIC == MAGIC, "SRE module mismatch"
if _sre.CODESIZE == 2:
MAXCODE = 65535
else:
MAXCODE = 0xFFFFFFFFL
_LITERAL_CODES = set([LITERAL, NOT_LITERAL])
_REPEATING_CODES = set([REPEAT, MIN_REPEAT, MAX_REPEAT])
_SUCCESS_CODES = set([SUCCESS, FAILURE])
_ASSERT_CODES = set([ASSERT, ASSERT_NOT])
# Sets of lowercase characters which have the same uppercase.
_equivalences = (
# LATIN SMALL LETTER I, LATIN SMALL LETTER DOTLESS I
(0x69, 0x131), # iı
# LATIN SMALL LETTER S, LATIN SMALL LETTER LONG S
(0x73, 0x17f), # sſ
# MICRO SIGN, GREEK SMALL LETTER MU
(0xb5, 0x3bc), # µμ
# COMBINING GREEK YPOGEGRAMMENI, GREEK SMALL LETTER IOTA, GREEK PROSGEGRAMMENI
(0x345, 0x3b9, 0x1fbe), # \u0345ιι
# GREEK SMALL LETTER BETA, GREEK BETA SYMBOL
(0x3b2, 0x3d0), # βϐ
# GREEK SMALL LETTER EPSILON, GREEK LUNATE EPSILON SYMBOL
(0x3b5, 0x3f5), # εϵ
# GREEK SMALL LETTER THETA, GREEK THETA SYMBOL
(0x3b8, 0x3d1), # θϑ
# GREEK SMALL LETTER KAPPA, GREEK KAPPA SYMBOL
(0x3ba, 0x3f0), # κϰ
# GREEK SMALL LETTER PI, GREEK PI SYMBOL
(0x3c0, 0x3d6), # πϖ
# GREEK SMALL LETTER RHO, GREEK RHO SYMBOL
(0x3c1, 0x3f1), # ρϱ
# GREEK SMALL LETTER FINAL SIGMA, GREEK SMALL LETTER SIGMA
(0x3c2, 0x3c3), # ςσ
# GREEK SMALL LETTER PHI, GREEK PHI SYMBOL
(0x3c6, 0x3d5), # φϕ
# LATIN SMALL LETTER S WITH DOT ABOVE, LATIN SMALL LETTER LONG S WITH DOT ABOVE
(0x1e61, 0x1e9b), # ṡẛ
)
# Maps the lowercase code to lowercase codes which have the same uppercase.
_ignorecase_fixes = {i: tuple(j for j in t if i != j)
for t in _equivalences for i in t}
def _compile(code, pattern, flags):
# internal: compile a (sub)pattern
emit = code.append
_len = len
LITERAL_CODES = _LITERAL_CODES
REPEATING_CODES = _REPEATING_CODES
SUCCESS_CODES = _SUCCESS_CODES
ASSERT_CODES = _ASSERT_CODES
if (flags & SRE_FLAG_IGNORECASE and
not (flags & SRE_FLAG_LOCALE) and
flags & SRE_FLAG_UNICODE):
fixes = _ignorecase_fixes
else:
fixes = None
for op, av in pattern:
if op in LITERAL_CODES:
if flags & SRE_FLAG_IGNORECASE:
lo = _sre.getlower(av, flags)
if fixes and lo in fixes:
emit(OPCODES[IN_IGNORE])
skip = _len(code); emit(0)
if op is NOT_LITERAL:
emit(OPCODES[NEGATE])
for k in (lo,) + fixes[lo]:
emit(OPCODES[LITERAL])
emit(k)
emit(OPCODES[FAILURE])
code[skip] = _len(code) - skip
else:
emit(OPCODES[OP_IGNORE[op]])
emit(lo)
else:
emit(OPCODES[op])
emit(av)
elif op is IN:
if flags & SRE_FLAG_IGNORECASE:
emit(OPCODES[OP_IGNORE[op]])
def fixup(literal, flags=flags):
return _sre.getlower(literal, flags)
else:
emit(OPCODES[op])
fixup = None
skip = _len(code); emit(0)
_compile_charset(av, flags, code, fixup, fixes)
code[skip] = _len(code) - skip
elif op is ANY:
if flags & SRE_FLAG_DOTALL:
emit(OPCODES[ANY_ALL])
else:
emit(OPCODES[ANY])
elif op in REPEATING_CODES:
if flags & SRE_FLAG_TEMPLATE:
raise error, "internal: unsupported template operator"
emit(OPCODES[REPEAT])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif _simple(av) and op is not REPEAT:
if op is MAX_REPEAT:
emit(OPCODES[REPEAT_ONE])
else:
emit(OPCODES[MIN_REPEAT_ONE])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
else:
emit(OPCODES[REPEAT])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
code[skip] = _len(code) - skip
if op is MAX_REPEAT:
emit(OPCODES[MAX_UNTIL])
else:
emit(OPCODES[MIN_UNTIL])
elif op is SUBPATTERN:
if av[0]:
emit(OPCODES[MARK])
emit((av[0]-1)*2)
# _compile_info(code, av[1], flags)
_compile(code, av[1], flags)
if av[0]:
emit(OPCODES[MARK])
emit((av[0]-1)*2+1)
elif op in SUCCESS_CODES:
emit(OPCODES[op])
elif op in ASSERT_CODES:
emit(OPCODES[op])
skip = _len(code); emit(0)
if av[0] >= 0:
emit(0) # look ahead
else:
lo, hi = av[1].getwidth()
if lo != hi:
raise error, "look-behind requires fixed-width pattern"
emit(lo) # look behind
_compile(code, av[1], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif op is CALL:
emit(OPCODES[op])
skip = _len(code); emit(0)
_compile(code, av, flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif op is AT:
emit(OPCODES[op])
if flags & SRE_FLAG_MULTILINE:
av = AT_MULTILINE.get(av, av)
if flags & SRE_FLAG_LOCALE:
av = AT_LOCALE.get(av, av)
elif flags & SRE_FLAG_UNICODE:
av = AT_UNICODE.get(av, av)
emit(ATCODES[av])
elif op is BRANCH:
emit(OPCODES[op])
tail = []
tailappend = tail.append
for av in av[1]:
skip = _len(code); emit(0)
# _compile_info(code, av, flags)
_compile(code, av, flags)
emit(OPCODES[JUMP])
tailappend(_len(code)); emit(0)
code[skip] = _len(code) - skip
emit(0) # end of branch
for tail in tail:
code[tail] = _len(code) - tail
elif op is CATEGORY:
emit(OPCODES[op])
if flags & SRE_FLAG_LOCALE:
av = CH_LOCALE[av]
elif flags & SRE_FLAG_UNICODE:
av = CH_UNICODE[av]
emit(CHCODES[av])
elif op is GROUPREF:
if flags & SRE_FLAG_IGNORECASE:
emit(OPCODES[OP_IGNORE[op]])
else:
emit(OPCODES[op])
emit(av-1)
elif op is GROUPREF_EXISTS:
emit(OPCODES[op])
emit(av[0]-1)
skipyes = _len(code); emit(0)
_compile(code, av[1], flags)
if av[2]:
emit(OPCODES[JUMP])
skipno = _len(code); emit(0)
code[skipyes] = _len(code) - skipyes + 1
_compile(code, av[2], flags)
code[skipno] = _len(code) - skipno
else:
code[skipyes] = _len(code) - skipyes + 1
else:
raise ValueError, ("unsupported operand type", op)
def _compile_charset(charset, flags, code, fixup=None, fixes=None):
# compile charset subprogram
emit = code.append
for op, av in _optimize_charset(charset, fixup, fixes,
flags & SRE_FLAG_UNICODE):
emit(OPCODES[op])
if op is NEGATE:
pass
elif op is LITERAL:
emit(av)
elif op is RANGE:
emit(av[0])
emit(av[1])
elif op is CHARSET:
code.extend(av)
elif op is BIGCHARSET:
code.extend(av)
elif op is CATEGORY:
if flags & SRE_FLAG_LOCALE:
emit(CHCODES[CH_LOCALE[av]])
elif flags & SRE_FLAG_UNICODE:
emit(CHCODES[CH_UNICODE[av]])
else:
emit(CHCODES[av])
else:
raise error, "internal: unsupported set operator"
emit(OPCODES[FAILURE])
def _optimize_charset(charset, fixup, fixes, isunicode):
# internal: optimize character set
out = []
tail = []
charmap = bytearray(256)
for op, av in charset:
while True:
try:
if op is LITERAL:
if fixup:
i = fixup(av)
charmap[i] = 1
if fixes and i in fixes:
for k in fixes[i]:
charmap[k] = 1
else:
charmap[av] = 1
elif op is RANGE:
r = range(av[0], av[1]+1)
if fixup:
r = map(fixup, r)
if fixup and fixes:
for i in r:
charmap[i] = 1
if i in fixes:
for k in fixes[i]:
charmap[k] = 1
else:
for i in r:
charmap[i] = 1
elif op is NEGATE:
out.append((op, av))
else:
tail.append((op, av))
except IndexError:
if len(charmap) == 256:
# character set contains non-UCS1 character codes
charmap += b'\0' * 0xff00
continue
# character set contains non-BMP character codes
if fixup and isunicode and op is RANGE:
lo, hi = av
ranges = [av]
# There are only two ranges of cased astral characters:
# 10400-1044F (Deseret) and 118A0-118DF (Warang Citi).
_fixup_range(max(0x10000, lo), min(0x11fff, hi),
ranges, fixup)
for lo, hi in ranges:
if lo == hi:
tail.append((LITERAL, hi))
else:
tail.append((RANGE, (lo, hi)))
else:
tail.append((op, av))
break
# compress character map
runs = []
q = 0
while True:
p = charmap.find(b'\1', q)
if p < 0:
break
if len(runs) >= 2:
runs = None
break
q = charmap.find(b'\0', p)
if q < 0:
runs.append((p, len(charmap)))
break
runs.append((p, q))
if runs is not None:
# use literal/range
for p, q in runs:
if q - p == 1:
out.append((LITERAL, p))
else:
out.append((RANGE, (p, q - 1)))
out += tail
# if the case was changed or new representation is more compact
if fixup or len(out) < len(charset):
return out
# else original character set is good enough
return charset
# use bitmap
if len(charmap) == 256:
data = _mk_bitmap(charmap)
out.append((CHARSET, data))
out += tail
return out
# To represent a big charset, first a bitmap of all characters in the
# set is constructed. Then, this bitmap is sliced into chunks of 256
# characters, duplicate chunks are eliminated, and each chunk is
# given a number. In the compiled expression, the charset is
# represented by a 32-bit word sequence, consisting of one word for
# the number of different chunks, a sequence of 256 bytes (64 words)
# of chunk numbers indexed by their original chunk position, and a
# sequence of 256-bit chunks (8 words each).
# Compression is normally good: in a typical charset, large ranges of
# Unicode will be either completely excluded (e.g. if only cyrillic
# letters are to be matched), or completely included (e.g. if large
# subranges of Kanji match). These ranges will be represented by
# chunks of all one-bits or all zero-bits.
# Matching can be also done efficiently: the more significant byte of
# the Unicode character is an index into the chunk number, and the
# less significant byte is a bit index in the chunk (just like the
# CHARSET matching).
# In UCS-4 mode, the BIGCHARSET opcode still supports only subsets
# of the basic multilingual plane; an efficient representation
# for all of Unicode has not yet been developed.
charmap = bytes(charmap) # should be hashable
comps = {}
mapping = bytearray(256)
block = 0
data = bytearray()
for i in range(0, 65536, 256):
chunk = charmap[i: i + 256]
if chunk in comps:
mapping[i // 256] = comps[chunk]
else:
mapping[i // 256] = comps[chunk] = block
block += 1
data += chunk
data = _mk_bitmap(data)
data[0:0] = [block] + _bytes_to_codes(mapping)
out.append((BIGCHARSET, data))
out += tail
return out
def _fixup_range(lo, hi, ranges, fixup):
for i in map(fixup, range(lo, hi+1)):
for k, (lo, hi) in enumerate(ranges):
if i < lo:
if l == lo - 1:
ranges[k] = (i, hi)
else:
ranges.insert(k, (i, i))
break
elif i > hi:
if i == hi + 1:
ranges[k] = (lo, i)
break
else:
break
else:
ranges.append((i, i))
_CODEBITS = _sre.CODESIZE * 8
_BITS_TRANS = b'0' + b'1' * 255
def _mk_bitmap(bits, _CODEBITS=_CODEBITS, _int=int):
s = bytes(bits).translate(_BITS_TRANS)[::-1]
return [_int(s[i - _CODEBITS: i], 2)
for i in range(len(s), 0, -_CODEBITS)]
def _bytes_to_codes(b):
# Convert block indices to word array
import array
if _sre.CODESIZE == 2:
code = 'H'
else:
code = 'I'
a = array.array(code, bytes(b))
assert a.itemsize == _sre.CODESIZE
assert len(a) * a.itemsize == len(b)
return a.tolist()
def _simple(av):
# check if av is a "simple" operator
lo, hi = av[2].getwidth()
return lo == hi == 1 and av[2][0][0] != SUBPATTERN
def _compile_info(code, pattern, flags):
# internal: compile an info block. in the current version,
# this contains min/max pattern width, and an optional literal
# prefix or a character map
lo, hi = pattern.getwidth()
if lo == 0:
return # not worth it
# look for a literal prefix
prefix = []
prefixappend = prefix.append
prefix_skip = 0
charset = [] # not used
charsetappend = charset.append
if not (flags & SRE_FLAG_IGNORECASE):
# look for literal prefix
for op, av in pattern.data:
if op is LITERAL:
if len(prefix) == prefix_skip:
prefix_skip = prefix_skip + 1
prefixappend(av)
elif op is SUBPATTERN and len(av[1]) == 1:
op, av = av[1][0]
if op is LITERAL:
prefixappend(av)
else:
break
else:
break
# if no prefix, look for charset prefix
if not prefix and pattern.data:
op, av = pattern.data[0]
if op is SUBPATTERN and av[1]:
op, av = av[1][0]
if op is LITERAL:
charsetappend((op, av))
elif op is BRANCH:
c = []
cappend = c.append
for p in av[1]:
if not p:
break
op, av = p[0]
if op is LITERAL:
cappend((op, av))
else:
break
else:
charset = c
elif op is BRANCH:
c = []
cappend = c.append
for p in av[1]:
if not p:
break
op, av = p[0]
if op is LITERAL:
cappend((op, av))
else:
break
else:
charset = c
elif op is IN:
charset = av
## if prefix:
## print "*** PREFIX", prefix, prefix_skip
## if charset:
## print "*** CHARSET", charset
# add an info block
emit = code.append
emit(OPCODES[INFO])
skip = len(code); emit(0)
# literal flag
mask = 0
if prefix:
mask = SRE_INFO_PREFIX
if len(prefix) == prefix_skip == len(pattern.data):
mask = mask + SRE_INFO_LITERAL
elif charset:
mask = mask + SRE_INFO_CHARSET
emit(mask)
# pattern length
if lo < MAXCODE:
emit(lo)
else:
emit(MAXCODE)
prefix = prefix[:MAXCODE]
if hi < MAXCODE:
emit(hi)
else:
emit(0)
# add literal prefix
if prefix:
emit(len(prefix)) # length
emit(prefix_skip) # skip
code.extend(prefix)
# generate overlap table
table = [-1] + ([0]*len(prefix))
for i in xrange(len(prefix)):
table[i+1] = table[i]+1
while table[i+1] > 0 and prefix[i] != prefix[table[i+1]-1]:
table[i+1] = table[table[i+1]-1]+1
code.extend(table[1:]) # don't store first entry
elif charset:
_compile_charset(charset, flags, code)
code[skip] = len(code) - skip
try:
unicode
except NameError:
STRING_TYPES = (type(""),)
else:
STRING_TYPES = (type(""), type(unicode("")))
def isstring(obj):
for tp in STRING_TYPES:
if isinstance(obj, tp):
return 1
return 0
def _code(p, flags):
flags = p.pattern.flags | flags
code = []
# compile info block
_compile_info(code, p, flags)
# compile the pattern
_compile(code, p.data, flags)
code.append(OPCODES[SUCCESS])
return code
def compile(p, flags=0):
# internal: convert pattern list to internal format
if isstring(p):
pattern = p
p = sre_parse.parse(p, flags)
else:
pattern = None
code = _code(p, flags)
# print code
# XXX: <fl> get rid of this limitation!
if p.pattern.groups > 100:
raise AssertionError(
"sorry, but this version only supports 100 named groups"
)
# map in either direction
groupindex = p.pattern.groupdict
indexgroup = [None] * p.pattern.groups
for k, i in groupindex.items():
indexgroup[i] = k
return _sre.compile(
pattern, flags | p.pattern.flags, code,
p.pattern.groups-1,
groupindex, indexgroup
)
| svanschalkwyk/datafari | windows/python/Lib/sre_compile.py | Python | apache-2.0 | 19,817 |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Unit tests for emr_emr_usage_demo.py functions.
"""
import time
import pytest
import boto3
from boto3.s3.transfer import S3UploadFailedError
from botocore.exceptions import ClientError
import emr_usage_demo
@pytest.mark.parametrize('error_code,stop_on_method', [
(None, None),
('TestException', 'stub_create_bucket'),
('TestException', 'stub_put_object'),
])
def test_setup_bucket(
make_stubber, make_unique_name, stub_runner, error_code, stop_on_method):
s3_resource = boto3.resource('s3')
s3_stubber = make_stubber(s3_resource.meta.client)
bucket_name = make_unique_name('bucket-')
script_file_name = __file__
script_key = 'test-key'
with stub_runner(error_code, stop_on_method) as runner:
runner.add(
s3_stubber.stub_create_bucket, bucket_name,
s3_resource.meta.client.meta.region_name)
runner.add(s3_stubber.stub_head_bucket, bucket_name)
runner.add(s3_stubber.stub_put_object, bucket_name, script_key)
if error_code is None:
bucket = emr_usage_demo.setup_bucket(
bucket_name, script_file_name, script_key, s3_resource)
assert bucket.name == bucket_name
elif stop_on_method == 'stub_put_object':
with pytest.raises(S3UploadFailedError):
emr_usage_demo.setup_bucket(
bucket_name, script_file_name, script_key, s3_resource)
else:
with pytest.raises(ClientError) as exc_info:
emr_usage_demo.setup_bucket(
bucket_name, script_file_name, script_key, s3_resource)
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code,stop_on_method', [
(None, None),
('TestException', 'stub_list_objects'),
('TestException', 'stub_delete_objects'),
('TestException', 'stub_delete_bucket')
])
def test_delete_bucket(
make_stubber, make_unique_name, stub_runner, error_code, stop_on_method):
s3_resource = boto3.resource('s3')
s3_stubber = make_stubber(s3_resource.meta.client)
bucket = s3_resource.Bucket(make_unique_name('bucket-'))
obj_keys = ['test-key-1', 'test-key-2']
with stub_runner(error_code, stop_on_method) as runner:
runner.add(s3_stubber.stub_list_objects, bucket.name, object_keys=obj_keys)
runner.add(s3_stubber.stub_delete_objects, bucket.name, obj_keys)
runner.add(s3_stubber.stub_delete_bucket, bucket.name)
if error_code is None:
emr_usage_demo.delete_bucket(bucket)
else:
with pytest.raises(ClientError) as exc_info:
emr_usage_demo.delete_bucket(bucket)
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize(
'error_code_jf,stop_on_method_jf,error_code_svc,stop_on_method_svc', [
(None, None, None, None),
('TestException', 'stub_create_role', None, None),
('TestException', 'stub_attach_role_policy', None, None),
('TestException', 'stub_create_instance_profile', None, None),
('TestException', 'stub_add_role_to_instance_profile', None, None),
(None, None, 'TestException', 'stub_create_role'),
(None, None, 'TestException', 'stub_attach_role_policy'),
])
def test_create_roles(
make_stubber, make_unique_name, stub_runner, error_code_jf, stop_on_method_jf,
error_code_svc, stop_on_method_svc):
iam_resource = boto3.resource('iam')
iam_stubber = make_stubber(iam_resource.meta.client)
job_flow_role_name = make_unique_name('jfrole-')
service_role_name = make_unique_name('srole-')
with stub_runner(error_code_jf, stop_on_method_jf) as runner_jf:
runner_jf.add(iam_stubber.stub_create_role, job_flow_role_name)
runner_jf.add(iam_stubber.stub_get_role, job_flow_role_name)
runner_jf.add(
iam_stubber.stub_attach_role_policy, job_flow_role_name,
"arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceforEC2Role")
runner_jf.add(iam_stubber.stub_create_instance_profile, job_flow_role_name)
runner_jf.add(
iam_stubber.stub_add_role_to_instance_profile, job_flow_role_name,
job_flow_role_name)
if error_code_jf is None:
with stub_runner(error_code_svc, stop_on_method_svc) as runner_svc:
runner_svc.add(iam_stubber.stub_create_role, service_role_name)
runner_svc.add(iam_stubber.stub_get_role, service_role_name)
runner_svc.add(
iam_stubber.stub_attach_role_policy, service_role_name,
'arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole')
if error_code_jf is None and error_code_svc is None:
job_flow_role, service_role = emr_usage_demo.create_roles(
job_flow_role_name, service_role_name, iam_resource)
assert job_flow_role.name == job_flow_role_name
assert service_role.name == service_role_name
else:
with pytest.raises(ClientError) as exc_info:
emr_usage_demo.create_roles(
job_flow_role_name, service_role_name, iam_resource)
assert (exc_info.value.response['Error']['Code'] == error_code_jf or
exc_info.value.response['Error']['Code'] == error_code_svc)
@pytest.mark.parametrize('error_code,stop_on_method', [
(None, None),
('TestException', 'stub_list_attached_role_policies'),
('TestException', 'stub_detach_role_policy'),
('TestException', 'stub_list_instance_profiles_for_role'),
('TestException', 'stub_remove_role_from_instance_profile'),
('TestException', 'stub_delete_instance_profile'),
('TestException', 'stub_delete_role')
])
def test_delete_roles(
make_stubber, make_unique_name, stub_runner, error_code, stop_on_method):
iam_resource = boto3.resource('iam')
iam_stubber = make_stubber(iam_resource.meta.client)
roles = [iam_resource.Role(make_unique_name('role-')) for _ in range(2)]
policy_arn = 'arn:aws:iam:::policy/test-policy'
policy = {'test-policy': policy_arn}
inst_profile = 'test-profile'
with stub_runner(error_code, stop_on_method) as runner:
for role in roles:
runner.add(
iam_stubber.stub_list_attached_role_policies, role.name,
policy)
runner.add(iam_stubber.stub_detach_role_policy, role.name, policy_arn)
runner.add(
iam_stubber.stub_list_instance_profiles_for_role, role.name,
[inst_profile])
runner.add(
iam_stubber.stub_remove_role_from_instance_profile, inst_profile,
role.name)
runner.add(iam_stubber.stub_delete_instance_profile, inst_profile)
runner.add(iam_stubber.stub_delete_role, role.name)
if error_code is None:
emr_usage_demo.delete_roles(roles)
else:
with pytest.raises(ClientError) as exc_info:
emr_usage_demo.delete_roles(roles)
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code,stop_on_method', [
(None, None),
('TestException', 'stub_describe_vpcs'),
('TestException', 'stub_create_security_group'),
])
def test_create_security_groups(
make_stubber, make_unique_name, stub_runner, error_code, stop_on_method):
ec2_resource = boto3.resource('ec2')
ec2_stubber = make_stubber(ec2_resource.meta.client)
vpc_id = 'test-vpc'
sec_groups = {kind: f'sg-{kind}' for kind in ['manager', 'worker']}
with stub_runner(error_code, stop_on_method) as runner:
runner.add(
ec2_stubber.stub_describe_vpcs, {vpc_id: True},
[{'Name': 'isDefault', 'Values': ['true']}])
runner.add(
ec2_stubber.stub_create_security_group, 'test-manager',
'EMR manager group.', vpc_id, sec_groups['manager'])
runner.add(
ec2_stubber.stub_create_security_group, 'test-worker',
'EMR worker group.', vpc_id, sec_groups['worker'])
if error_code is None:
got_groups = emr_usage_demo.create_security_groups('test', ec2_resource)
assert [group.id for group in got_groups.values()] == list(sec_groups.values())
else:
with pytest.raises(ClientError) as exc_info:
emr_usage_demo.create_security_groups('test', ec2_resource)
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code,stop_on_method', [
(None, None),
('TestException', 'stub_describe_security_groups'),
('TestException', 'stub_revoke_security_group_ingress'),
('TestException', 'stub_delete_security_group'),
])
def test_delete_security_groups(
make_stubber, stub_runner, error_code, stop_on_method):
ec2_resource = boto3.resource('ec2')
ec2_stubber = make_stubber(ec2_resource.meta.client)
sec_group_info = {
kind: {
'sg': ec2_resource.SecurityGroup(f'sg-{kind}'),
'id': f'sg-{kind}',
'ip_permissions': [],
'group_name': f'test-{kind}'}
for kind in ['manager', 'worker']}
with stub_runner(error_code, stop_on_method) as runner:
for sg in sec_group_info.values():
runner.add(ec2_stubber.stub_describe_security_groups, [sg])
runner.add(ec2_stubber.stub_revoke_security_group_ingress, sg)
for sg in sec_group_info.values():
runner.add(ec2_stubber.stub_delete_security_group, sg['id'])
if error_code is None:
emr_usage_demo.delete_security_groups(
{key: value['sg'] for key, value in sec_group_info.items()})
else:
with pytest.raises(ClientError) as exc_info:
emr_usage_demo.delete_security_groups(
{key: value['sg'] for key, value in sec_group_info.items()})
assert exc_info.value.response['Error']['Code'] == error_code
def test_delete_security_groups_dependency_violation(make_stubber, monkeypatch):
ec2_resource = boto3.resource('ec2')
ec2_stubber = make_stubber(ec2_resource.meta.client)
sec_group_info = {
'sg': ec2_resource.SecurityGroup(f'sg-test'),
'id': f'sg-test',
'ip_permissions': [],
'group_name': f'test-test'}
monkeypatch.setattr(time, 'sleep', lambda x: None)
ec2_stubber.stub_describe_security_groups([sec_group_info])
ec2_stubber.stub_revoke_security_group_ingress(sec_group_info)
ec2_stubber.stub_delete_security_group(
sec_group_info['id'], error_code='DependencyViolation')
ec2_stubber.stub_delete_security_group(sec_group_info['id'])
emr_usage_demo.delete_security_groups({'sg': sec_group_info['sg']})
| awsdocs/aws-doc-sdk-examples | python/example_code/emr/test/test_emr_usage_demo.py | Python | apache-2.0 | 10,773 |
def append_attr(obj, attr, value):
"""
Appends value to object attribute
Attribute may be undefined
For example:
append_attr(obj, 'test', 1)
append_attr(obj, 'test', 2)
assert obj.test == [1, 2]
"""
try:
getattr(obj, attr).append(value)
except AttributeError:
setattr(obj, attr, [value])
| baverman/snaked | snaked/signals/util.py | Python | mit | 375 |
from wtforms import StringField, PasswordField, SubmitField, TextAreaField
from flask_wtf import Form
from wtforms.validators import Length, DataRequired, EqualTo, Email
class RegistrationForm(Form):
username = StringField('Username', [Length(min=4, max=25), DataRequired("Enter username")])
email = StringField('Email Address', [Email("Enter email"), Length(min=6, max=35)])
password = PasswordField('New Password', [DataRequired("Enter password"),
EqualTo('confirm', message='Passwords must match')])
confirm = PasswordField('Repeat Password')
submit = SubmitField('Register')
class LoginForm(Form):
username = StringField('Username', [Length(min=4, max=25), DataRequired("Enter username")])
password = PasswordField('Enter Password', [DataRequired("Enter your password")])
submit = SubmitField('Log In')
class SuggestionForm(Form):
title = StringField('Title', [Length(min=3, max=50), DataRequired("Enter a title")])
description = TextAreaField(
'Description', [Length(min=6, max=140), DataRequired("Description required")])
submit = SubmitField('Suggest')
class CommentForm(Form):
comment = TextAreaField('Comment', [Length(min=4, max=200), DataRequired("Comment required")])
| bonny-mwenda/bc-8-suggestion-box | forms.py | Python | gpl-3.0 | 1,274 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RTclust(RPackage):
"""Robust Trimmed Clustering
Provides functions for robust trimmed clustering. The methods are described
in Garcia-Escudero (2008) <doi:10.1214/07-AOS515>, Fritz et al. (2012)
<doi:10.18637/jss.v047.i12>, Garcia-Escudero et al. (2011)
<doi:10.1007/s11222-010-9194-z> and others."""
homepage = "https://cloud.r-project.org/package=tclust"
url = "https://cloud.r-project.org/src/contrib/tclust_1.3-1.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/tclust"
version('1.4-2', sha256='95dcd07dbd16383f07f5cea8561e7f3bf314e4a7483879841103b149fc8c65d9')
version('1.4-1', sha256='4b0be612c8ecd7b4eb19a44ab6ac8f5d40515600ae1144c55989b6b41335ad9e')
version('1.3-1', sha256='fe4479a73b947d8f6c1cc63587283a8b6223d430d39eee4e5833a06d3d1726d2')
version('1.2-7', sha256='7d2cfa35bbd44086af45be842e6c4743380c7cc8a0f985d2bb7c1a0690c878d7')
version('1.2-3', sha256='d749d4e4107b876a22ca2c0299e30e2c77cb04f53f7e5658348e274aae3f2b28')
version('1.1-03', sha256='b8a62a1d27e69ac7e985ba5ea2ae5d182d2e51665bfbfb178e22b63041709270')
version('1.1-02', sha256='f73c0d7a495552f901b710cf34e114c0ba401d5a17c48156313245904bcccad4')
depends_on('r@2.12.0:', type=('build', 'run'))
| LLNL/spack | var/spack/repos/builtin/packages/r-tclust/package.py | Python | lgpl-2.1 | 1,488 |
#!/usr/bin/env python3
'''
Kurgan MultiAgent Framework
http://www.kurgan.com.br/
Agent to check for backup and interesting files.
Author: Glaudson Ocampos - <glaudson@kurgan.com.br>
Created in August 09th, 2016.
Last Modified in January 13th, 2017.
'''
import sys, os
import random
import string
from multiprocessing import Process
import stomp
import signal, time
from daemonize import Daemonize
from os.path import basename
current_dir = os.path.basename(os.getcwd())
if current_dir == "agents":
sys.path.append('../')
if current_dir == "Kurgan-Framework":
sys.path.append('./')
#from libs.STOMP import STOMP_Connector
from libs.FIPA import FIPAMessage
import config as cf
AGENT_NAME="AgentBackup"
AGENT_ID="20"
conn = ''
class AgentBackup:
url = ''
port = ''
conn = stomp.Connection()
def readConfig(self, conf):
print("Getting configurations..")
def setURL(self, val):
self.url = val
def getURL(self):
return self.url
def setPort(self, val):
selt.port = val
def getPort(self):
return self.port
def setConn(self, val):
conn = val
def getConn(self):
return self.conn
class MyListener(stomp.ConnectionListener):
running = 0
def set_runnning(self, val):
self.running = val
def get_running(self):
return self.running
def on_error(self, headers, message):
print('received an error "%s"' % message)
def on_message(self, headers, message):
#gen_fpm(message)
if message == "AgentBackup":
if self.running == 0:
print("Running Attack to search backup and interesting files...")
# conn.send(body='OK.. estou no ar!', destination='/queue/kurgan')
msg = ("(tell\n"
"\t:sender AgentBackup\n"
"\t:receiver MainBoard\n"
"\t:in-reply-to msg777\n"
"\t:ontology KurganDict\n"
"\t:language Python\n"
"\t:content \"crawling(URL, Trying get backup and interesting files)\"\n"
")\n")
send_message(msg)
self.running = 1
p = Process(target=crawling, args=('alvo',))
p.start()
else:
msg="Agent in execution! Please Wait."
send_message(msg)
#conn.send(body='Agent in execution! Please Wait.', destination=cf.STOMP_DESTINATION)
print("Ja executando o ataque de crawling....")
else:
print('%s' % message)
def send_message(msg):
conn.send(body=msg, destination=cf.STOMP_TOPIC)
def receive_data_from_agents():
ret = conn.receive_data()
return ret
def handler(signum, frame):
print("Stop execution...", signum);
sys.exit(0)
def crawling(target):
print("Crawling ataque.....");
time.sleep(200000)
def main(args):
if args[0] == "foreground":
run()
else:
if args[0] == "background":
run("on")
else:
show_help()
exit
exit
def runAgent():
signal.signal(signal.SIGINT, handler)
global conn
fpm = FIPAMessage()
fpm.set_performative("subscribe")
fpm.set_sender(AGENT_NAME)
fpm.set_receiver("All Agents")
content = ("Register Agent (= (agent-name) (" + AGENT_NAME + "))\n")
fpm.set_content(content)
fpm.set_reply_with(''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(5)))
fpm.close_message()
#print(fpm.get_message())
conn = stomp.Connection()
conn.set_listener('', MyListener())
conn.start()
conn.connect(cf.STOMP_USERNAME, cf.STOMP_PASSWORD, wait=True)
conn.subscribe(destination=cf.STOMP_TOPIC, id=AGENT_ID, ack='auto')
conn.send(body=''.join(fpm.get_message()), destination=cf.STOMP_TOPIC)
while True:
time.sleep(2)
#rcv = receive_data_from_agents()
#if not rcv:
# print(rcv)
conn.disconnect()
def show_help():
print("Kurgan MultiAgent Framework version ", cf.VERSION)
print("Usage: python3 agentBackup.py <background|foreground>")
print("\nExample:\n")
print("python3 agentBackup.py background")
exit(0)
def run(background=False):
if background == True:
pid = os.fork()
if pid:
p = basename(sys.argv[0])
myname, file_extension = os.path.splitext(p)
pidfile = '/tmp/%s.pid' % myname
daemon = Daemonize(app=myname, pid=pidfile, action=runAgent)
daemon.start()
print("Agent Loaded.")
else:
runAgent()
def main(args):
if args[0] == "foreground":
run(background=False)
else:
if args[0] == "background":
run(background=True)
else:
show_help()
exit
exit
if __name__ == '__main__':
main(sys.argv[1:]) | glaudsonml/kurgan-ai | agents/agentBackup.py | Python | apache-2.0 | 5,090 |
from benchfw import BenchEnv, BenchmarkIt
import sys
if __name__=='__main__':
if len(sys.argv) !=2:
print("Usage {} <dbname>".format(sys.argv[0]))
sys.exit(1)
db_name = sys.argv[1]
benv = BenchEnv(db_name=db_name)
bench = BenchmarkIt.load(env=benv, name="Load CSV")
df = bench.to_df(case="Nop")
print("NOP")
print(df)
df = bench.to_df(case="Pandas",corrected=True, raw_header=True)
print("PANDAS")
print(df)
df = bench.to_df(case="Progressivis", corrected=False, raw_header=True)
print("PROGRESSIVIS")
print(df)
bench.plot( x=range(1,4), corrected=True)
| jdfekete/progressivis | benchmarks/new_dump.py | Python | bsd-2-clause | 635 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Corpus in the Matrix Market format.
"""
import logging
from gensim import interfaces, matutils
from gensim.corpora import IndexedCorpus
logger = logging.getLogger('gensim.corpora.mmcorpus')
class MmCorpus(matutils.MmReader, IndexedCorpus):
"""
Corpus in the Matrix Market format.
"""
def __init__(self, fname):
# avoid calling super(), too confusing
IndexedCorpus.__init__(self, fname)
matutils.MmReader.__init__(self, fname)
def __iter__(self):
"""
Interpret a matrix in Matrix Market format as a streamed gensim corpus
(yielding one document at a time).
"""
for doc_id, doc in super(MmCorpus, self).__iter__():
yield doc # get rid of doc id, return the sparse vector only
@staticmethod
def save_corpus(fname, corpus, id2word=None, progress_cnt=1000, metadata=False):
"""
Save a corpus in the Matrix Market format to disk.
This function is automatically called by `MmCorpus.serialize`; don't
call it directly, call `serialize` instead.
"""
logger.info("storing corpus in Matrix Market format to %s" % fname)
num_terms = len(id2word) if id2word is not None else None
return matutils.MmWriter.write_corpus(fname, corpus, num_terms=num_terms, index=True, progress_cnt=progress_cnt, metadata=metadata)
# endclass MmCorpus
| krishna11888/ai | third_party/gensim/gensim/corpora/mmcorpus.py | Python | gpl-2.0 | 1,588 |
# AsteriskLint -- an Asterisk PBX config syntax checker
# Copyright (C) 2015-2019 Walter Doekes, OSSO B.V.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from .cls import Singleton
class AsteriskVersion(metaclass=Singleton):
"""
Store the used Asterisk version globally. If you don't initialize
this before anyone requests anything, you get the default.
Example::
from asterisklint.version import AsteriskVersion
AsteriskVersion('v13') # set version 13 throughout the run
"""
DEFAULT = 'v13'
def __init__(self, version=None):
self.version = version or self.DEFAULT
def reinit(self, version=None):
if version and self.version != version:
raise RuntimeError(
'Attempt to re-set Asterisk version from {} to {}'.format(
self.version, version))
def list_app_mods(self):
"""
Return a list app names in absolute import format. Takes
internal version into account.
"""
return self._list_mods('app')
def list_func_mods(self):
"""
Return a list function names in absolute import format. Takes
internal version into account.
"""
return self._list_mods('func')
def _get_path(self, submod):
return os.path.join(os.path.dirname(__file__), submod, self.version)
def _list_mods(self, submod):
appsdir = self._get_path(submod)
appmods = [i[0:-3] for i in os.listdir(appsdir) if i.endswith('.py')]
modfmt = 'asterisklint.{}.{}.{{}}'.format(submod, self.version)
return [modfmt.format(appmod) for appmod in appmods]
| ossobv/asterisklint | asterisklint/version.py | Python | gpl-3.0 | 2,259 |
from os import environ
properties = {}
properties['GITHUB_USERNAME'] = environ['GITHUB_USERNAME']
properties['GITHUB_PASSWORD'] = environ['GITHUB_PASSWORD']
properties['GITHUB_REPO'] = environ['GITHUB_REPO']
properties['REDIS_PORT'] = environ['REDIS_PORT']
properties['REDIS_HOST'] = environ['REDIS_HOST']
| nivertech/gordon | web/config.py | Python | apache-2.0 | 308 |
# Copyright 2013 Netease Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for availability zones
"""
from oslo_config import cfg
from nova import availability_zones as az
from nova import context
from nova import db
from nova import test
from nova.tests.unit.api.openstack import fakes
CONF = cfg.CONF
CONF.import_opt('internal_service_availability_zone',
'nova.availability_zones')
CONF.import_opt('default_availability_zone',
'nova.availability_zones')
class AvailabilityZoneTestCases(test.TestCase):
"""Test case for aggregate based availability zone."""
def setUp(self):
super(AvailabilityZoneTestCases, self).setUp()
self.host = 'me'
self.availability_zone = 'nova-test'
self.default_az = CONF.default_availability_zone
self.default_in_az = CONF.internal_service_availability_zone
self.context = context.get_admin_context()
self.agg = self._create_az('az_agg', self.availability_zone)
def tearDown(self):
db.aggregate_delete(self.context, self.agg['id'])
super(AvailabilityZoneTestCases, self).tearDown()
def _create_az(self, agg_name, az_name):
agg_meta = {'name': agg_name}
agg = db.aggregate_create(self.context, agg_meta)
metadata = {'availability_zone': az_name}
db.aggregate_metadata_add(self.context, agg['id'], metadata)
return agg
def _update_az(self, aggregate, az_name):
metadata = {'availability_zone': az_name}
db.aggregate_update(self.context, aggregate['id'], metadata)
def _create_service_with_topic(self, topic, host, disabled=False):
values = {
'binary': 'bin',
'host': host,
'topic': topic,
'disabled': disabled,
}
return db.service_create(self.context, values)
def _destroy_service(self, service):
return db.service_destroy(self.context, service['id'])
def _add_to_aggregate(self, service, aggregate):
return db.aggregate_host_add(self.context,
aggregate['id'], service['host'])
def _delete_from_aggregate(self, service, aggregate):
return db.aggregate_host_delete(self.context,
aggregate['id'], service['host'])
def test_rest_availability_zone_reset_cache(self):
az._get_cache().add('cache', 'fake_value')
az.reset_cache()
self.assertIsNone(az._get_cache().get('cache'))
def test_update_host_availability_zone_cache(self):
"""Test availability zone cache could be update."""
service = self._create_service_with_topic('compute', self.host)
# Create a new aggregate with an AZ and add the host to the AZ
az_name = 'az1'
cache_key = az._make_cache_key(self.host)
agg_az1 = self._create_az('agg-az1', az_name)
self._add_to_aggregate(service, agg_az1)
az.update_host_availability_zone_cache(self.context, self.host)
self.assertEqual(az._get_cache().get(cache_key), 'az1')
az.update_host_availability_zone_cache(self.context, self.host, 'az2')
self.assertEqual(az._get_cache().get(cache_key), 'az2')
def test_set_availability_zone_compute_service(self):
"""Test for compute service get right availability zone."""
service = self._create_service_with_topic('compute', self.host)
services = db.service_get_all(self.context)
# The service is not add into aggregate, so confirm it is default
# availability zone.
new_service = az.set_availability_zones(self.context, services)[0]
self.assertEqual(new_service['availability_zone'],
self.default_az)
# The service is added into aggregate, confirm return the aggregate
# availability zone.
self._add_to_aggregate(service, self.agg)
new_service = az.set_availability_zones(self.context, services)[0]
self.assertEqual(new_service['availability_zone'],
self.availability_zone)
self._destroy_service(service)
def test_set_availability_zone_unicode_key(self):
"""Test set availability zone cache key is unicode."""
service = self._create_service_with_topic('network', self.host)
services = db.service_get_all(self.context)
az.set_availability_zones(self.context, services)
self.assertIsInstance(services[0]['host'], unicode)
cached_key = az._make_cache_key(services[0]['host'])
self.assertIsInstance(cached_key, str)
self._destroy_service(service)
def test_set_availability_zone_not_compute_service(self):
"""Test not compute service get right availability zone."""
service = self._create_service_with_topic('network', self.host)
services = db.service_get_all(self.context)
new_service = az.set_availability_zones(self.context, services)[0]
self.assertEqual(new_service['availability_zone'],
self.default_in_az)
self._destroy_service(service)
def test_get_host_availability_zone(self):
"""Test get right availability zone by given host."""
self.assertEqual(self.default_az,
az.get_host_availability_zone(self.context, self.host))
service = self._create_service_with_topic('compute', self.host)
self._add_to_aggregate(service, self.agg)
self.assertEqual(self.availability_zone,
az.get_host_availability_zone(self.context, self.host))
def test_update_host_availability_zone(self):
"""Test availability zone could be update by given host."""
service = self._create_service_with_topic('compute', self.host)
# Create a new aggregate with an AZ and add the host to the AZ
az_name = 'az1'
agg_az1 = self._create_az('agg-az1', az_name)
self._add_to_aggregate(service, agg_az1)
self.assertEqual(az_name,
az.get_host_availability_zone(self.context, self.host))
# Update AZ
new_az_name = 'az2'
self._update_az(agg_az1, new_az_name)
self.assertEqual(new_az_name,
az.get_host_availability_zone(self.context, self.host))
def test_delete_host_availability_zone(self):
"""Test availability zone could be deleted successfully."""
service = self._create_service_with_topic('compute', self.host)
# Create a new aggregate with an AZ and add the host to the AZ
az_name = 'az1'
agg_az1 = self._create_az('agg-az1', az_name)
self._add_to_aggregate(service, agg_az1)
self.assertEqual(az_name,
az.get_host_availability_zone(self.context, self.host))
# Delete the AZ via deleting the aggregate
self._delete_from_aggregate(service, agg_az1)
self.assertEqual(self.default_az,
az.get_host_availability_zone(self.context, self.host))
def test_get_availability_zones(self):
"""Test get_availability_zones."""
# When the param get_only_available of get_availability_zones is set
# to default False, it returns two lists, zones with at least one
# enabled services, and zones with no enabled services,
# when get_only_available is set to True, only return a list of zones
# with at least one enabled services.
# Use the following test data:
#
# zone host enabled
# nova-test host1 Yes
# nova-test host2 No
# nova-test2 host3 Yes
# nova-test3 host4 No
# <default> host5 No
agg2 = self._create_az('agg-az2', 'nova-test2')
agg3 = self._create_az('agg-az3', 'nova-test3')
service1 = self._create_service_with_topic('compute', 'host1',
disabled=False)
service2 = self._create_service_with_topic('compute', 'host2',
disabled=True)
service3 = self._create_service_with_topic('compute', 'host3',
disabled=False)
service4 = self._create_service_with_topic('compute', 'host4',
disabled=True)
self._create_service_with_topic('compute', 'host5',
disabled=True)
self._add_to_aggregate(service1, self.agg)
self._add_to_aggregate(service2, self.agg)
self._add_to_aggregate(service3, agg2)
self._add_to_aggregate(service4, agg3)
zones, not_zones = az.get_availability_zones(self.context)
self.assertEqual(zones, ['nova-test', 'nova-test2'])
self.assertEqual(not_zones, ['nova-test3', 'nova'])
zones = az.get_availability_zones(self.context, True)
self.assertEqual(zones, ['nova-test', 'nova-test2'])
zones, not_zones = az.get_availability_zones(self.context,
with_hosts=True)
self.assertJsonEqual(zones,
[(u'nova-test2', set([u'host3'])),
(u'nova-test', set([u'host1']))])
self.assertJsonEqual(not_zones,
[(u'nova-test3', set([u'host4'])),
(u'nova', set([u'host5']))])
def test_get_instance_availability_zone_default_value(self):
"""Test get right availability zone by given an instance."""
fake_inst_id = 162
fake_inst = fakes.stub_instance(fake_inst_id, host=self.host)
self.assertEqual(self.default_az,
az.get_instance_availability_zone(self.context, fake_inst))
def test_get_instance_availability_zone_from_aggregate(self):
"""Test get availability zone from aggregate by given an instance."""
host = 'host170'
service = self._create_service_with_topic('compute', host)
self._add_to_aggregate(service, self.agg)
fake_inst_id = 174
fake_inst = fakes.stub_instance(fake_inst_id, host=host)
self.assertEqual(self.availability_zone,
az.get_instance_availability_zone(self.context, fake_inst))
| petrutlucian94/nova | nova/tests/unit/test_availability_zones.py | Python | apache-2.0 | 10,927 |
# coding: utf-8
from __future__ import unicode_literals
import re
import itertools
from .common import InfoExtractor
from ..utils import (
clean_html,
dict_get,
ExtractorError,
float_or_none,
get_element_by_class,
int_or_none,
js_to_json,
parse_duration,
parse_iso8601,
try_get,
unescapeHTML,
urlencode_postdata,
urljoin,
)
from ..compat import (
compat_HTTPError,
compat_urlparse,
)
class BBCCoUkIE(InfoExtractor):
IE_NAME = 'bbc.co.uk'
IE_DESC = 'BBC iPlayer'
_ID_REGEX = r'(?:[pbm][\da-z]{7}|w[\da-z]{7,14})'
_VALID_URL = r'''(?x)
https?://
(?:www\.)?bbc\.co\.uk/
(?:
programmes/(?!articles/)|
iplayer(?:/[^/]+)?/(?:episode/|playlist/)|
music/(?:clips|audiovideo/popular)[/#]|
radio/player/|
events/[^/]+/play/[^/]+/
)
(?P<id>%s)(?!/(?:episodes|broadcasts|clips))
''' % _ID_REGEX
_LOGIN_URL = 'https://account.bbc.com/signin'
_NETRC_MACHINE = 'bbc'
_MEDIASELECTOR_URLS = [
# Provides HQ HLS streams with even better quality that pc mediaset but fails
# with geolocation in some cases when it's even not geo restricted at all (e.g.
# http://www.bbc.co.uk/programmes/b06bp7lf). Also may fail with selectionunavailable.
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/iptv-all/vpid/%s',
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s',
]
_MEDIASELECTION_NS = 'http://bbc.co.uk/2008/mp/mediaselection'
_EMP_PLAYLIST_NS = 'http://bbc.co.uk/2008/emp/playlist'
_NAMESPACES = (
_MEDIASELECTION_NS,
_EMP_PLAYLIST_NS,
)
_TESTS = [
{
'url': 'http://www.bbc.co.uk/programmes/b039g8p7',
'info_dict': {
'id': 'b039d07m',
'ext': 'flv',
'title': 'Leonard Cohen, Kaleidoscope - BBC Radio 4',
'description': 'The Canadian poet and songwriter reflects on his musical career.',
},
'params': {
# rtmp download
'skip_download': True,
}
},
{
'url': 'http://www.bbc.co.uk/iplayer/episode/b00yng5w/The_Man_in_Black_Series_3_The_Printed_Name/',
'info_dict': {
'id': 'b00yng1d',
'ext': 'flv',
'title': 'The Man in Black: Series 3: The Printed Name',
'description': "Mark Gatiss introduces Nicholas Pierpan's chilling tale of a writer's devilish pact with a mysterious man. Stars Ewan Bailey.",
'duration': 1800,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Episode is no longer available on BBC iPlayer Radio',
},
{
'url': 'http://www.bbc.co.uk/iplayer/episode/b03vhd1f/The_Voice_UK_Series_3_Blind_Auditions_5/',
'info_dict': {
'id': 'b00yng1d',
'ext': 'flv',
'title': 'The Voice UK: Series 3: Blind Auditions 5',
'description': 'Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.',
'duration': 5100,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
},
{
'url': 'http://www.bbc.co.uk/iplayer/episode/p026c7jt/tomorrows-worlds-the-unearthly-history-of-science-fiction-2-invasion',
'info_dict': {
'id': 'b03k3pb7',
'ext': 'flv',
'title': "Tomorrow's Worlds: The Unearthly History of Science Fiction",
'description': '2. Invasion',
'duration': 3600,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
}, {
'url': 'http://www.bbc.co.uk/programmes/b04v20dw',
'info_dict': {
'id': 'b04v209v',
'ext': 'flv',
'title': 'Pete Tong, The Essential New Tune Special',
'description': "Pete has a very special mix - all of 2014's Essential New Tunes!",
'duration': 10800,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Episode is no longer available on BBC iPlayer Radio',
}, {
'url': 'http://www.bbc.co.uk/music/clips/p022h44b',
'note': 'Audio',
'info_dict': {
'id': 'p022h44j',
'ext': 'flv',
'title': 'BBC Proms Music Guides, Rachmaninov: Symphonic Dances',
'description': "In this Proms Music Guide, Andrew McGregor looks at Rachmaninov's Symphonic Dances.",
'duration': 227,
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'http://www.bbc.co.uk/music/clips/p025c0zz',
'note': 'Video',
'info_dict': {
'id': 'p025c103',
'ext': 'flv',
'title': 'Reading and Leeds Festival, 2014, Rae Morris - Closer (Live on BBC Three)',
'description': 'Rae Morris performs Closer for BBC Three at Reading 2014',
'duration': 226,
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'http://www.bbc.co.uk/iplayer/episode/b054fn09/ad/natural-world-20152016-2-super-powered-owls',
'info_dict': {
'id': 'p02n76xf',
'ext': 'flv',
'title': 'Natural World, 2015-2016: 2. Super Powered Owls',
'description': 'md5:e4db5c937d0e95a7c6b5e654d429183d',
'duration': 3540,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'geolocation',
}, {
'url': 'http://www.bbc.co.uk/iplayer/episode/b05zmgwn/royal-academy-summer-exhibition',
'info_dict': {
'id': 'b05zmgw1',
'ext': 'flv',
'description': 'Kirsty Wark and Morgan Quaintance visit the Royal Academy as it prepares for its annual artistic extravaganza, meeting people who have come together to make the show unique.',
'title': 'Royal Academy Summer Exhibition',
'duration': 3540,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'geolocation',
}, {
# iptv-all mediaset fails with geolocation however there is no geo restriction
# for this programme at all
'url': 'http://www.bbc.co.uk/programmes/b06rkn85',
'info_dict': {
'id': 'b06rkms3',
'ext': 'flv',
'title': "Best of the Mini-Mixes 2015: Part 3, Annie Mac's Friday Night - BBC Radio 1",
'description': "Annie has part three in the Best of the Mini-Mixes 2015, plus the year's Most Played!",
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Now it\'s really geo-restricted',
}, {
# compact player (https://github.com/rg3/youtube-dl/issues/8147)
'url': 'http://www.bbc.co.uk/programmes/p028bfkf/player',
'info_dict': {
'id': 'p028bfkj',
'ext': 'flv',
'title': 'Extract from BBC documentary Look Stranger - Giant Leeks and Magic Brews',
'description': 'Extract from BBC documentary Look Stranger - Giant Leeks and Magic Brews',
},
'params': {
# rtmp download
'skip_download': True,
},
}, {
'url': 'http://www.bbc.co.uk/iplayer/playlist/p01dvks4',
'only_matching': True,
}, {
'url': 'http://www.bbc.co.uk/music/clips#p02frcc3',
'only_matching': True,
}, {
'url': 'http://www.bbc.co.uk/iplayer/cbeebies/episode/b0480276/bing-14-atchoo',
'only_matching': True,
}, {
'url': 'http://www.bbc.co.uk/radio/player/p03cchwf',
'only_matching': True,
}, {
'url': 'https://www.bbc.co.uk/music/audiovideo/popular#p055bc55',
'only_matching': True,
}, {
'url': 'http://www.bbc.co.uk/programmes/w3csv1y9',
'only_matching': True,
}, {
'url': 'https://www.bbc.co.uk/programmes/m00005xn',
'only_matching': True,
}, {
'url': 'https://www.bbc.co.uk/programmes/w172w4dww1jqt5s',
'only_matching': True,
}]
_USP_RE = r'/([^/]+?)\.ism(?:\.hlsv2\.ism)?/[^/]+\.m3u8'
def _login(self):
username, password = self._get_login_info()
if username is None:
return
login_page = self._download_webpage(
self._LOGIN_URL, None, 'Downloading signin page')
login_form = self._hidden_inputs(login_page)
login_form.update({
'username': username,
'password': password,
})
post_url = urljoin(self._LOGIN_URL, self._search_regex(
r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page,
'post url', default=self._LOGIN_URL, group='url'))
response, urlh = self._download_webpage_handle(
post_url, None, 'Logging in', data=urlencode_postdata(login_form),
headers={'Referer': self._LOGIN_URL})
if self._LOGIN_URL in urlh.geturl():
error = clean_html(get_element_by_class('form-message', response))
if error:
raise ExtractorError(
'Unable to login: %s' % error, expected=True)
raise ExtractorError('Unable to log in')
def _real_initialize(self):
self._login()
class MediaSelectionError(Exception):
def __init__(self, id):
self.id = id
def _extract_asx_playlist(self, connection, programme_id):
asx = self._download_xml(connection.get('href'), programme_id, 'Downloading ASX playlist')
return [ref.get('href') for ref in asx.findall('./Entry/ref')]
def _extract_items(self, playlist):
return playlist.findall('./{%s}item' % self._EMP_PLAYLIST_NS)
def _findall_ns(self, element, xpath):
elements = []
for ns in self._NAMESPACES:
elements.extend(element.findall(xpath % ns))
return elements
def _extract_medias(self, media_selection):
error = media_selection.find('./{%s}error' % self._MEDIASELECTION_NS)
if error is None:
media_selection.find('./{%s}error' % self._EMP_PLAYLIST_NS)
if error is not None:
raise BBCCoUkIE.MediaSelectionError(error.get('id'))
return self._findall_ns(media_selection, './{%s}media')
def _extract_connections(self, media):
return self._findall_ns(media, './{%s}connection')
def _get_subtitles(self, media, programme_id):
subtitles = {}
for connection in self._extract_connections(media):
captions = self._download_xml(connection.get('href'), programme_id, 'Downloading captions')
lang = captions.get('{http://www.w3.org/XML/1998/namespace}lang', 'en')
subtitles[lang] = [
{
'url': connection.get('href'),
'ext': 'ttml',
},
]
return subtitles
def _raise_extractor_error(self, media_selection_error):
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, media_selection_error.id),
expected=True)
def _download_media_selector(self, programme_id):
last_exception = None
for mediaselector_url in self._MEDIASELECTOR_URLS:
try:
return self._download_media_selector_url(
mediaselector_url % programme_id, programme_id)
except BBCCoUkIE.MediaSelectionError as e:
if e.id in ('notukerror', 'geolocation', 'selectionunavailable'):
last_exception = e
continue
self._raise_extractor_error(e)
self._raise_extractor_error(last_exception)
def _download_media_selector_url(self, url, programme_id=None):
media_selection = self._download_xml(
url, programme_id, 'Downloading media selection XML',
expected_status=(403, 404))
return self._process_media_selector(media_selection, programme_id)
def _process_media_selector(self, media_selection, programme_id):
formats = []
subtitles = None
urls = []
for media in self._extract_medias(media_selection):
kind = media.get('kind')
if kind in ('video', 'audio'):
bitrate = int_or_none(media.get('bitrate'))
encoding = media.get('encoding')
service = media.get('service')
width = int_or_none(media.get('width'))
height = int_or_none(media.get('height'))
file_size = int_or_none(media.get('media_file_size'))
for connection in self._extract_connections(media):
href = connection.get('href')
if href in urls:
continue
if href:
urls.append(href)
conn_kind = connection.get('kind')
protocol = connection.get('protocol')
supplier = connection.get('supplier')
transfer_format = connection.get('transferFormat')
format_id = supplier or conn_kind or protocol
if service:
format_id = '%s_%s' % (service, format_id)
# ASX playlist
if supplier == 'asx':
for i, ref in enumerate(self._extract_asx_playlist(connection, programme_id)):
formats.append({
'url': ref,
'format_id': 'ref%s_%s' % (i, format_id),
})
elif transfer_format == 'dash':
formats.extend(self._extract_mpd_formats(
href, programme_id, mpd_id=format_id, fatal=False))
elif transfer_format == 'hls':
formats.extend(self._extract_m3u8_formats(
href, programme_id, ext='mp4', entry_protocol='m3u8_native',
m3u8_id=format_id, fatal=False))
if re.search(self._USP_RE, href):
usp_formats = self._extract_m3u8_formats(
re.sub(self._USP_RE, r'/\1.ism/\1.m3u8', href),
programme_id, ext='mp4', entry_protocol='m3u8_native',
m3u8_id=format_id, fatal=False)
for f in usp_formats:
if f.get('height') and f['height'] > 720:
continue
formats.append(f)
elif transfer_format == 'hds':
formats.extend(self._extract_f4m_formats(
href, programme_id, f4m_id=format_id, fatal=False))
else:
if not service and not supplier and bitrate:
format_id += '-%d' % bitrate
fmt = {
'format_id': format_id,
'filesize': file_size,
}
if kind == 'video':
fmt.update({
'width': width,
'height': height,
'tbr': bitrate,
'vcodec': encoding,
})
else:
fmt.update({
'abr': bitrate,
'acodec': encoding,
'vcodec': 'none',
})
if protocol in ('http', 'https'):
# Direct link
fmt.update({
'url': href,
})
elif protocol == 'rtmp':
application = connection.get('application', 'ondemand')
auth_string = connection.get('authString')
identifier = connection.get('identifier')
server = connection.get('server')
fmt.update({
'url': '%s://%s/%s?%s' % (protocol, server, application, auth_string),
'play_path': identifier,
'app': '%s?%s' % (application, auth_string),
'page_url': 'http://www.bbc.co.uk',
'player_url': 'http://www.bbc.co.uk/emp/releases/iplayer/revisions/617463_618125_4/617463_618125_4_emp.swf',
'rtmp_live': False,
'ext': 'flv',
})
else:
continue
formats.append(fmt)
elif kind == 'captions':
subtitles = self.extract_subtitles(media, programme_id)
return formats, subtitles
def _download_playlist(self, playlist_id):
try:
playlist = self._download_json(
'http://www.bbc.co.uk/programmes/%s/playlist.json' % playlist_id,
playlist_id, 'Downloading playlist JSON')
version = playlist.get('defaultAvailableVersion')
if version:
smp_config = version['smpConfig']
title = smp_config['title']
description = smp_config['summary']
for item in smp_config['items']:
kind = item['kind']
if kind not in ('programme', 'radioProgramme'):
continue
programme_id = item.get('vpid')
duration = int_or_none(item.get('duration'))
formats, subtitles = self._download_media_selector(programme_id)
return programme_id, title, description, duration, formats, subtitles
except ExtractorError as ee:
if not (isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404):
raise
# fallback to legacy playlist
return self._process_legacy_playlist(playlist_id)
def _process_legacy_playlist_url(self, url, display_id):
playlist = self._download_legacy_playlist_url(url, display_id)
return self._extract_from_legacy_playlist(playlist, display_id)
def _process_legacy_playlist(self, playlist_id):
return self._process_legacy_playlist_url(
'http://www.bbc.co.uk/iplayer/playlist/%s' % playlist_id, playlist_id)
def _download_legacy_playlist_url(self, url, playlist_id=None):
return self._download_xml(
url, playlist_id, 'Downloading legacy playlist XML')
def _extract_from_legacy_playlist(self, playlist, playlist_id):
no_items = playlist.find('./{%s}noItems' % self._EMP_PLAYLIST_NS)
if no_items is not None:
reason = no_items.get('reason')
if reason == 'preAvailability':
msg = 'Episode %s is not yet available' % playlist_id
elif reason == 'postAvailability':
msg = 'Episode %s is no longer available' % playlist_id
elif reason == 'noMedia':
msg = 'Episode %s is not currently available' % playlist_id
else:
msg = 'Episode %s is not available: %s' % (playlist_id, reason)
raise ExtractorError(msg, expected=True)
for item in self._extract_items(playlist):
kind = item.get('kind')
if kind not in ('programme', 'radioProgramme'):
continue
title = playlist.find('./{%s}title' % self._EMP_PLAYLIST_NS).text
description_el = playlist.find('./{%s}summary' % self._EMP_PLAYLIST_NS)
description = description_el.text if description_el is not None else None
def get_programme_id(item):
def get_from_attributes(item):
for p in ('identifier', 'group'):
value = item.get(p)
if value and re.match(r'^[pb][\da-z]{7}$', value):
return value
get_from_attributes(item)
mediator = item.find('./{%s}mediator' % self._EMP_PLAYLIST_NS)
if mediator is not None:
return get_from_attributes(mediator)
programme_id = get_programme_id(item)
duration = int_or_none(item.get('duration'))
if programme_id:
formats, subtitles = self._download_media_selector(programme_id)
else:
formats, subtitles = self._process_media_selector(item, playlist_id)
programme_id = playlist_id
return programme_id, title, description, duration, formats, subtitles
def _real_extract(self, url):
group_id = self._match_id(url)
webpage = self._download_webpage(url, group_id, 'Downloading video page')
error = self._search_regex(
r'<div\b[^>]+\bclass=["\']smp__message delta["\'][^>]*>([^<]+)<',
webpage, 'error', default=None)
if error:
raise ExtractorError(error, expected=True)
programme_id = None
duration = None
tviplayer = self._search_regex(
r'mediator\.bind\(({.+?})\s*,\s*document\.getElementById',
webpage, 'player', default=None)
if tviplayer:
player = self._parse_json(tviplayer, group_id).get('player', {})
duration = int_or_none(player.get('duration'))
programme_id = player.get('vpid')
if not programme_id:
programme_id = self._search_regex(
r'"vpid"\s*:\s*"(%s)"' % self._ID_REGEX, webpage, 'vpid', fatal=False, default=None)
if programme_id:
formats, subtitles = self._download_media_selector(programme_id)
title = self._og_search_title(webpage, default=None) or self._html_search_regex(
(r'<h2[^>]+id="parent-title"[^>]*>(.+?)</h2>',
r'<div[^>]+class="info"[^>]*>\s*<h1>(.+?)</h1>'), webpage, 'title')
description = self._search_regex(
(r'<p class="[^"]*medium-description[^"]*">([^<]+)</p>',
r'<div[^>]+class="info_+synopsis"[^>]*>([^<]+)</div>'),
webpage, 'description', default=None)
if not description:
description = self._html_search_meta('description', webpage)
else:
programme_id, title, description, duration, formats, subtitles = self._download_playlist(group_id)
self._sort_formats(formats)
return {
'id': programme_id,
'title': title,
'description': description,
'thumbnail': self._og_search_thumbnail(webpage, default=None),
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
class BBCIE(BBCCoUkIE):
IE_NAME = 'bbc'
IE_DESC = 'BBC'
_VALID_URL = r'https?://(?:www\.)?bbc\.(?:com|co\.uk)/(?:[^/]+/)+(?P<id>[^/#?]+)'
_MEDIASELECTOR_URLS = [
# Provides HQ HLS streams but fails with geolocation in some cases when it's
# even not geo restricted at all
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/iptv-all/vpid/%s',
# Provides more formats, namely direct mp4 links, but fails on some videos with
# notukerror for non UK (?) users (e.g.
# http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret)
'http://open.live.bbc.co.uk/mediaselector/4/mtis/stream/%s',
# Provides fewer formats, but works everywhere for everybody (hopefully)
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/journalism-pc/vpid/%s',
]
_TESTS = [{
# article with multiple videos embedded with data-playable containing vpids
'url': 'http://www.bbc.com/news/world-europe-32668511',
'info_dict': {
'id': 'world-europe-32668511',
'title': 'Russia stages massive WW2 parade despite Western boycott',
'description': 'md5:00ff61976f6081841f759a08bf78cc9c',
},
'playlist_count': 2,
}, {
# article with multiple videos embedded with data-playable (more videos)
'url': 'http://www.bbc.com/news/business-28299555',
'info_dict': {
'id': 'business-28299555',
'title': 'Farnborough Airshow: Video highlights',
'description': 'BBC reports and video highlights at the Farnborough Airshow.',
},
'playlist_count': 9,
'skip': 'Save time',
}, {
# article with multiple videos embedded with `new SMP()`
# broken
'url': 'http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460',
'info_dict': {
'id': '3662a707-0af9-3149-963f-47bea720b460',
'title': 'BUGGER',
},
'playlist_count': 18,
}, {
# single video embedded with data-playable containing vpid
'url': 'http://www.bbc.com/news/world-europe-32041533',
'info_dict': {
'id': 'p02mprgb',
'ext': 'mp4',
'title': 'Aerial footage showed the site of the crash in the Alps - courtesy BFM TV',
'description': 'md5:2868290467291b37feda7863f7a83f54',
'duration': 47,
'timestamp': 1427219242,
'upload_date': '20150324',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
# article with single video embedded with data-playable containing XML playlist
# with direct video links as progressiveDownloadUrl (for now these are extracted)
# and playlist with f4m and m3u8 as streamingUrl
'url': 'http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu',
'info_dict': {
'id': '150615_telabyad_kentin_cogu',
'ext': 'mp4',
'title': "YPG: Tel Abyad'ın tamamı kontrolümüzde",
'description': 'md5:33a4805a855c9baf7115fcbde57e7025',
'timestamp': 1434397334,
'upload_date': '20150615',
},
'params': {
'skip_download': True,
}
}, {
# single video embedded with data-playable containing XML playlists (regional section)
'url': 'http://www.bbc.com/mundo/video_fotos/2015/06/150619_video_honduras_militares_hospitales_corrupcion_aw',
'info_dict': {
'id': '150619_video_honduras_militares_hospitales_corrupcion_aw',
'ext': 'mp4',
'title': 'Honduras militariza sus hospitales por nuevo escándalo de corrupción',
'description': 'md5:1525f17448c4ee262b64b8f0c9ce66c8',
'timestamp': 1434713142,
'upload_date': '20150619',
},
'params': {
'skip_download': True,
}
}, {
# single video from video playlist embedded with vxp-playlist-data JSON
'url': 'http://www.bbc.com/news/video_and_audio/must_see/33376376',
'info_dict': {
'id': 'p02w6qjc',
'ext': 'mp4',
'title': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''',
'duration': 56,
'description': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''',
},
'params': {
'skip_download': True,
}
}, {
# single video story with digitalData
'url': 'http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret',
'info_dict': {
'id': 'p02q6gc4',
'ext': 'flv',
'title': 'Sri Lanka’s spicy secret',
'description': 'As a new train line to Jaffna opens up the country’s north, travellers can experience a truly distinct slice of Tamil culture.',
'timestamp': 1437674293,
'upload_date': '20150723',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
# single video story without digitalData
'url': 'http://www.bbc.com/autos/story/20130513-hyundais-rock-star',
'info_dict': {
'id': 'p018zqqg',
'ext': 'mp4',
'title': 'Hyundai Santa Fe Sport: Rock star',
'description': 'md5:b042a26142c4154a6e472933cf20793d',
'timestamp': 1415867444,
'upload_date': '20141113',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
# single video embedded with Morph
'url': 'http://www.bbc.co.uk/sport/live/olympics/36895975',
'info_dict': {
'id': 'p041vhd0',
'ext': 'mp4',
'title': "Nigeria v Japan - Men's First Round",
'description': 'Live coverage of the first round from Group B at the Amazonia Arena.',
'duration': 7980,
'uploader': 'BBC Sport',
'uploader_id': 'bbc_sport',
},
'params': {
# m3u8 download
'skip_download': True,
},
'skip': 'Georestricted to UK',
}, {
# single video with playlist.sxml URL in playlist param
'url': 'http://www.bbc.com/sport/0/football/33653409',
'info_dict': {
'id': 'p02xycnp',
'ext': 'mp4',
'title': 'Transfers: Cristiano Ronaldo to Man Utd, Arsenal to spend?',
'description': 'BBC Sport\'s David Ornstein has the latest transfer gossip, including rumours of a Manchester United return for Cristiano Ronaldo.',
'duration': 140,
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
# article with multiple videos embedded with playlist.sxml in playlist param
'url': 'http://www.bbc.com/sport/0/football/34475836',
'info_dict': {
'id': '34475836',
'title': 'Jurgen Klopp: Furious football from a witty and winning coach',
'description': 'Fast-paced football, wit, wisdom and a ready smile - why Liverpool fans should come to love new boss Jurgen Klopp.',
},
'playlist_count': 3,
}, {
# school report article with single video
'url': 'http://www.bbc.co.uk/schoolreport/35744779',
'info_dict': {
'id': '35744779',
'title': 'School which breaks down barriers in Jerusalem',
},
'playlist_count': 1,
}, {
# single video with playlist URL from weather section
'url': 'http://www.bbc.com/weather/features/33601775',
'only_matching': True,
}, {
# custom redirection to www.bbc.com
'url': 'http://www.bbc.co.uk/news/science-environment-33661876',
'only_matching': True,
}, {
# single video article embedded with data-media-vpid
'url': 'http://www.bbc.co.uk/sport/rowing/35908187',
'only_matching': True,
}, {
'url': 'https://www.bbc.co.uk/bbcthree/clip/73d0bbd0-abc3-4cea-b3c0-cdae21905eb1',
'info_dict': {
'id': 'p06556y7',
'ext': 'mp4',
'title': 'Transfers: Cristiano Ronaldo to Man Utd, Arsenal to spend?',
'description': 'md5:4b7dfd063d5a789a1512e99662be3ddd',
},
'params': {
'skip_download': True,
}
}, {
# window.__PRELOADED_STATE__
'url': 'https://www.bbc.co.uk/radio/play/b0b9z4yl',
'info_dict': {
'id': 'b0b9z4vz',
'ext': 'mp4',
'title': 'Prom 6: An American in Paris and Turangalila',
'description': 'md5:51cf7d6f5c8553f197e58203bc78dff8',
'uploader': 'Radio 3',
'uploader_id': 'bbc_radio_three',
},
}, {
'url': 'http://www.bbc.co.uk/learningenglish/chinese/features/lingohack/ep-181227',
'info_dict': {
'id': 'p06w9tws',
'ext': 'mp4',
'title': 'md5:2fabf12a726603193a2879a055f72514',
'description': 'Learn English words and phrases from this story',
},
'add_ie': [BBCCoUkIE.ie_key()],
}]
@classmethod
def suitable(cls, url):
EXCLUDE_IE = (BBCCoUkIE, BBCCoUkArticleIE, BBCCoUkIPlayerPlaylistIE, BBCCoUkPlaylistIE)
return (False if any(ie.suitable(url) for ie in EXCLUDE_IE)
else super(BBCIE, cls).suitable(url))
def _extract_from_media_meta(self, media_meta, video_id):
# Direct links to media in media metadata (e.g.
# http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu)
# TODO: there are also f4m and m3u8 streams incorporated in playlist.sxml
source_files = media_meta.get('sourceFiles')
if source_files:
return [{
'url': f['url'],
'format_id': format_id,
'ext': f.get('encoding'),
'tbr': float_or_none(f.get('bitrate'), 1000),
'filesize': int_or_none(f.get('filesize')),
} for format_id, f in source_files.items() if f.get('url')], []
programme_id = media_meta.get('externalId')
if programme_id:
return self._download_media_selector(programme_id)
# Process playlist.sxml as legacy playlist
href = media_meta.get('href')
if href:
playlist = self._download_legacy_playlist_url(href)
_, _, _, _, formats, subtitles = self._extract_from_legacy_playlist(playlist, video_id)
return formats, subtitles
return [], []
def _extract_from_playlist_sxml(self, url, playlist_id, timestamp):
programme_id, title, description, duration, formats, subtitles = \
self._process_legacy_playlist_url(url, playlist_id)
self._sort_formats(formats)
return {
'id': programme_id,
'title': title,
'description': description,
'duration': duration,
'timestamp': timestamp,
'formats': formats,
'subtitles': subtitles,
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
json_ld_info = self._search_json_ld(webpage, playlist_id, default={})
timestamp = json_ld_info.get('timestamp')
playlist_title = json_ld_info.get('title')
if not playlist_title:
playlist_title = self._og_search_title(
webpage, default=None) or self._html_search_regex(
r'<title>(.+?)</title>', webpage, 'playlist title', default=None)
if playlist_title:
playlist_title = re.sub(r'(.+)\s*-\s*BBC.*?$', r'\1', playlist_title).strip()
playlist_description = json_ld_info.get(
'description') or self._og_search_description(webpage, default=None)
if not timestamp:
timestamp = parse_iso8601(self._search_regex(
[r'<meta[^>]+property="article:published_time"[^>]+content="([^"]+)"',
r'itemprop="datePublished"[^>]+datetime="([^"]+)"',
r'"datePublished":\s*"([^"]+)'],
webpage, 'date', default=None))
entries = []
# article with multiple videos embedded with playlist.sxml (e.g.
# http://www.bbc.com/sport/0/football/34475836)
playlists = re.findall(r'<param[^>]+name="playlist"[^>]+value="([^"]+)"', webpage)
playlists.extend(re.findall(r'data-media-id="([^"]+/playlist\.sxml)"', webpage))
if playlists:
entries = [
self._extract_from_playlist_sxml(playlist_url, playlist_id, timestamp)
for playlist_url in playlists]
# news article with multiple videos embedded with data-playable
data_playables = re.findall(r'data-playable=(["\'])({.+?})\1', webpage)
if data_playables:
for _, data_playable_json in data_playables:
data_playable = self._parse_json(
unescapeHTML(data_playable_json), playlist_id, fatal=False)
if not data_playable:
continue
settings = data_playable.get('settings', {})
if settings:
# data-playable with video vpid in settings.playlistObject.items (e.g.
# http://www.bbc.com/news/world-us-canada-34473351)
playlist_object = settings.get('playlistObject', {})
if playlist_object:
items = playlist_object.get('items')
if items and isinstance(items, list):
title = playlist_object['title']
description = playlist_object.get('summary')
duration = int_or_none(items[0].get('duration'))
programme_id = items[0].get('vpid')
formats, subtitles = self._download_media_selector(programme_id)
self._sort_formats(formats)
entries.append({
'id': programme_id,
'title': title,
'description': description,
'timestamp': timestamp,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
})
else:
# data-playable without vpid but with a playlist.sxml URLs
# in otherSettings.playlist (e.g.
# http://www.bbc.com/turkce/multimedya/2015/10/151010_vid_ankara_patlama_ani)
playlist = data_playable.get('otherSettings', {}).get('playlist', {})
if playlist:
entry = None
for key in ('streaming', 'progressiveDownload'):
playlist_url = playlist.get('%sUrl' % key)
if not playlist_url:
continue
try:
info = self._extract_from_playlist_sxml(
playlist_url, playlist_id, timestamp)
if not entry:
entry = info
else:
entry['title'] = info['title']
entry['formats'].extend(info['formats'])
except Exception as e:
# Some playlist URL may fail with 500, at the same time
# the other one may work fine (e.g.
# http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu)
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 500:
continue
raise
if entry:
self._sort_formats(entry['formats'])
entries.append(entry)
if entries:
return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
# http://www.bbc.co.uk/learningenglish/chinese/features/lingohack/ep-181227
group_id = self._search_regex(
r'<div[^>]+\bclass=["\']video["\'][^>]+\bdata-pid=["\'](%s)' % self._ID_REGEX,
webpage, 'group id', default=None)
if playlist_id:
return self.url_result(
'https://www.bbc.co.uk/programmes/%s' % group_id,
ie=BBCCoUkIE.ie_key())
# single video story (e.g. http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret)
programme_id = self._search_regex(
[r'data-(?:video-player|media)-vpid="(%s)"' % self._ID_REGEX,
r'<param[^>]+name="externalIdentifier"[^>]+value="(%s)"' % self._ID_REGEX,
r'videoId\s*:\s*["\'](%s)["\']' % self._ID_REGEX],
webpage, 'vpid', default=None)
if programme_id:
formats, subtitles = self._download_media_selector(programme_id)
self._sort_formats(formats)
# digitalData may be missing (e.g. http://www.bbc.com/autos/story/20130513-hyundais-rock-star)
digital_data = self._parse_json(
self._search_regex(
r'var\s+digitalData\s*=\s*({.+?});?\n', webpage, 'digital data', default='{}'),
programme_id, fatal=False)
page_info = digital_data.get('page', {}).get('pageInfo', {})
title = page_info.get('pageName') or self._og_search_title(webpage)
description = page_info.get('description') or self._og_search_description(webpage)
timestamp = parse_iso8601(page_info.get('publicationDate')) or timestamp
return {
'id': programme_id,
'title': title,
'description': description,
'timestamp': timestamp,
'formats': formats,
'subtitles': subtitles,
}
# Morph based embed (e.g. http://www.bbc.co.uk/sport/live/olympics/36895975)
# There are several setPayload calls may be present but the video
# seems to be always related to the first one
morph_payload = self._parse_json(
self._search_regex(
r'Morph\.setPayload\([^,]+,\s*({.+?})\);',
webpage, 'morph payload', default='{}'),
playlist_id, fatal=False)
if morph_payload:
components = try_get(morph_payload, lambda x: x['body']['components'], list) or []
for component in components:
if not isinstance(component, dict):
continue
lead_media = try_get(component, lambda x: x['props']['leadMedia'], dict)
if not lead_media:
continue
identifiers = lead_media.get('identifiers')
if not identifiers or not isinstance(identifiers, dict):
continue
programme_id = identifiers.get('vpid') or identifiers.get('playablePid')
if not programme_id:
continue
title = lead_media.get('title') or self._og_search_title(webpage)
formats, subtitles = self._download_media_selector(programme_id)
self._sort_formats(formats)
description = lead_media.get('summary')
uploader = lead_media.get('masterBrand')
uploader_id = lead_media.get('mid')
duration = None
duration_d = lead_media.get('duration')
if isinstance(duration_d, dict):
duration = parse_duration(dict_get(
duration_d, ('rawDuration', 'formattedDuration', 'spokenDuration')))
return {
'id': programme_id,
'title': title,
'description': description,
'duration': duration,
'uploader': uploader,
'uploader_id': uploader_id,
'formats': formats,
'subtitles': subtitles,
}
preload_state = self._parse_json(self._search_regex(
r'window\.__PRELOADED_STATE__\s*=\s*({.+?});', webpage,
'preload state', default='{}'), playlist_id, fatal=False)
if preload_state:
current_programme = preload_state.get('programmes', {}).get('current') or {}
programme_id = current_programme.get('id')
if current_programme and programme_id and current_programme.get('type') == 'playable_item':
title = current_programme.get('titles', {}).get('tertiary') or playlist_title
formats, subtitles = self._download_media_selector(programme_id)
self._sort_formats(formats)
synopses = current_programme.get('synopses') or {}
network = current_programme.get('network') or {}
duration = int_or_none(
current_programme.get('duration', {}).get('value'))
thumbnail = None
image_url = current_programme.get('image_url')
if image_url:
thumbnail = image_url.replace('{recipe}', '1920x1920')
return {
'id': programme_id,
'title': title,
'description': dict_get(synopses, ('long', 'medium', 'short')),
'thumbnail': thumbnail,
'duration': duration,
'uploader': network.get('short_title'),
'uploader_id': network.get('id'),
'formats': formats,
'subtitles': subtitles,
}
bbc3_config = self._parse_json(
self._search_regex(
r'(?s)bbcthreeConfig\s*=\s*({.+?})\s*;\s*<', webpage,
'bbcthree config', default='{}'),
playlist_id, transform_source=js_to_json, fatal=False)
if bbc3_config:
bbc3_playlist = try_get(
bbc3_config, lambda x: x['payload']['content']['bbcMedia']['playlist'],
dict)
if bbc3_playlist:
playlist_title = bbc3_playlist.get('title') or playlist_title
thumbnail = bbc3_playlist.get('holdingImageURL')
entries = []
for bbc3_item in bbc3_playlist['items']:
programme_id = bbc3_item.get('versionID')
if not programme_id:
continue
formats, subtitles = self._download_media_selector(programme_id)
self._sort_formats(formats)
entries.append({
'id': programme_id,
'title': playlist_title,
'thumbnail': thumbnail,
'timestamp': timestamp,
'formats': formats,
'subtitles': subtitles,
})
return self.playlist_result(
entries, playlist_id, playlist_title, playlist_description)
def extract_all(pattern):
return list(filter(None, map(
lambda s: self._parse_json(s, playlist_id, fatal=False),
re.findall(pattern, webpage))))
# Multiple video article (e.g.
# http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460)
EMBED_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:[^/]+/)+%s(?:\b[^"]+)?' % self._ID_REGEX
entries = []
for match in extract_all(r'new\s+SMP\(({.+?})\)'):
embed_url = match.get('playerSettings', {}).get('externalEmbedUrl')
if embed_url and re.match(EMBED_URL, embed_url):
entries.append(embed_url)
entries.extend(re.findall(
r'setPlaylist\("(%s)"\)' % EMBED_URL, webpage))
if entries:
return self.playlist_result(
[self.url_result(entry_, 'BBCCoUk') for entry_ in entries],
playlist_id, playlist_title, playlist_description)
# Multiple video article (e.g. http://www.bbc.com/news/world-europe-32668511)
medias = extract_all(r"data-media-meta='({[^']+})'")
if not medias:
# Single video article (e.g. http://www.bbc.com/news/video_and_audio/international)
media_asset = self._search_regex(
r'mediaAssetPage\.init\(\s*({.+?}), "/',
webpage, 'media asset', default=None)
if media_asset:
media_asset_page = self._parse_json(media_asset, playlist_id, fatal=False)
medias = []
for video in media_asset_page.get('videos', {}).values():
medias.extend(video.values())
if not medias:
# Multiple video playlist with single `now playing` entry (e.g.
# http://www.bbc.com/news/video_and_audio/must_see/33767813)
vxp_playlist = self._parse_json(
self._search_regex(
r'<script[^>]+class="vxp-playlist-data"[^>]+type="application/json"[^>]*>([^<]+)</script>',
webpage, 'playlist data'),
playlist_id)
playlist_medias = []
for item in vxp_playlist:
media = item.get('media')
if not media:
continue
playlist_medias.append(media)
# Download single video if found media with asset id matching the video id from URL
if item.get('advert', {}).get('assetId') == playlist_id:
medias = [media]
break
# Fallback to the whole playlist
if not medias:
medias = playlist_medias
entries = []
for num, media_meta in enumerate(medias, start=1):
formats, subtitles = self._extract_from_media_meta(media_meta, playlist_id)
if not formats:
continue
self._sort_formats(formats)
video_id = media_meta.get('externalId')
if not video_id:
video_id = playlist_id if len(medias) == 1 else '%s-%s' % (playlist_id, num)
title = media_meta.get('caption')
if not title:
title = playlist_title if len(medias) == 1 else '%s - Video %s' % (playlist_title, num)
duration = int_or_none(media_meta.get('durationInSeconds')) or parse_duration(media_meta.get('duration'))
images = []
for image in media_meta.get('images', {}).values():
images.extend(image.values())
if 'image' in media_meta:
images.append(media_meta['image'])
thumbnails = [{
'url': image.get('href'),
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in images]
entries.append({
'id': video_id,
'title': title,
'thumbnails': thumbnails,
'duration': duration,
'timestamp': timestamp,
'formats': formats,
'subtitles': subtitles,
})
return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
class BBCCoUkArticleIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/programmes/articles/(?P<id>[a-zA-Z0-9]+)'
IE_NAME = 'bbc.co.uk:article'
IE_DESC = 'BBC articles'
_TEST = {
'url': 'http://www.bbc.co.uk/programmes/articles/3jNQLTMrPlYGTBn0WV6M2MS/not-your-typical-role-model-ada-lovelace-the-19th-century-programmer',
'info_dict': {
'id': '3jNQLTMrPlYGTBn0WV6M2MS',
'title': 'Calculating Ada: The Countess of Computing - Not your typical role model: Ada Lovelace the 19th century programmer - BBC Four',
'description': 'Hannah Fry reveals some of her surprising discoveries about Ada Lovelace during filming.',
},
'playlist_count': 4,
'add_ie': ['BBCCoUk'],
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage).strip()
entries = [self.url_result(programme_url) for programme_url in re.findall(
r'<div[^>]+typeof="Clip"[^>]+resource="([^"]+)"', webpage)]
return self.playlist_result(entries, playlist_id, title, description)
class BBCCoUkPlaylistBaseIE(InfoExtractor):
def _entries(self, webpage, url, playlist_id):
single_page = 'page' in compat_urlparse.parse_qs(
compat_urlparse.urlparse(url).query)
for page_num in itertools.count(2):
for video_id in re.findall(
self._VIDEO_ID_TEMPLATE % BBCCoUkIE._ID_REGEX, webpage):
yield self.url_result(
self._URL_TEMPLATE % video_id, BBCCoUkIE.ie_key())
if single_page:
return
next_page = self._search_regex(
r'<li[^>]+class=(["\'])pagination_+next\1[^>]*><a[^>]+href=(["\'])(?P<url>(?:(?!\2).)+)\2',
webpage, 'next page url', default=None, group='url')
if not next_page:
break
webpage = self._download_webpage(
compat_urlparse.urljoin(url, next_page), playlist_id,
'Downloading page %d' % page_num, page_num)
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
title, description = self._extract_title_and_description(webpage)
return self.playlist_result(
self._entries(webpage, url, playlist_id),
playlist_id, title, description)
class BBCCoUkIPlayerPlaylistIE(BBCCoUkPlaylistBaseIE):
IE_NAME = 'bbc.co.uk:iplayer:playlist'
_VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/iplayer/(?:episodes|group)/(?P<id>%s)' % BBCCoUkIE._ID_REGEX
_URL_TEMPLATE = 'http://www.bbc.co.uk/iplayer/episode/%s'
_VIDEO_ID_TEMPLATE = r'data-ip-id=["\'](%s)'
_TESTS = [{
'url': 'http://www.bbc.co.uk/iplayer/episodes/b05rcz9v',
'info_dict': {
'id': 'b05rcz9v',
'title': 'The Disappearance',
'description': 'French thriller serial about a missing teenager.',
},
'playlist_mincount': 6,
'skip': 'This programme is not currently available on BBC iPlayer',
}, {
# Available for over a year unlike 30 days for most other programmes
'url': 'http://www.bbc.co.uk/iplayer/group/p02tcc32',
'info_dict': {
'id': 'p02tcc32',
'title': 'Bohemian Icons',
'description': 'md5:683e901041b2fe9ba596f2ab04c4dbe7',
},
'playlist_mincount': 10,
}]
def _extract_title_and_description(self, webpage):
title = self._search_regex(r'<h1>([^<]+)</h1>', webpage, 'title', fatal=False)
description = self._search_regex(
r'<p[^>]+class=(["\'])subtitle\1[^>]*>(?P<value>[^<]+)</p>',
webpage, 'description', fatal=False, group='value')
return title, description
class BBCCoUkPlaylistIE(BBCCoUkPlaylistBaseIE):
IE_NAME = 'bbc.co.uk:playlist'
_VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/programmes/(?P<id>%s)/(?:episodes|broadcasts|clips)' % BBCCoUkIE._ID_REGEX
_URL_TEMPLATE = 'http://www.bbc.co.uk/programmes/%s'
_VIDEO_ID_TEMPLATE = r'data-pid=["\'](%s)'
_TESTS = [{
'url': 'http://www.bbc.co.uk/programmes/b05rcz9v/clips',
'info_dict': {
'id': 'b05rcz9v',
'title': 'The Disappearance - Clips - BBC Four',
'description': 'French thriller serial about a missing teenager.',
},
'playlist_mincount': 7,
}, {
# multipage playlist, explicit page
'url': 'http://www.bbc.co.uk/programmes/b00mfl7n/clips?page=1',
'info_dict': {
'id': 'b00mfl7n',
'title': 'Frozen Planet - Clips - BBC One',
'description': 'md5:65dcbf591ae628dafe32aa6c4a4a0d8c',
},
'playlist_mincount': 24,
}, {
# multipage playlist, all pages
'url': 'http://www.bbc.co.uk/programmes/b00mfl7n/clips',
'info_dict': {
'id': 'b00mfl7n',
'title': 'Frozen Planet - Clips - BBC One',
'description': 'md5:65dcbf591ae628dafe32aa6c4a4a0d8c',
},
'playlist_mincount': 142,
}, {
'url': 'http://www.bbc.co.uk/programmes/b05rcz9v/broadcasts/2016/06',
'only_matching': True,
}, {
'url': 'http://www.bbc.co.uk/programmes/b05rcz9v/clips',
'only_matching': True,
}, {
'url': 'http://www.bbc.co.uk/programmes/b055jkys/episodes/player',
'only_matching': True,
}]
def _extract_title_and_description(self, webpage):
title = self._og_search_title(webpage, fatal=False)
description = self._og_search_description(webpage)
return title, description
| valmynd/MediaFetcher | src/plugins/youtube_dl/youtube_dl/extractor/bbc.py | Python | gpl-3.0 | 46,947 |
def create_nodes(femmesh):
# nodes
femmesh.addNode(0.0, 500.0, 500.0, 1)
femmesh.addNode(8000.0, 500.0, 500.0, 2)
femmesh.addNode(148.14814814814792, 500.0, 500.0, 3)
femmesh.addNode(296.29629629629585, 500.0, 500.0, 4)
femmesh.addNode(444.4444444444438, 500.0, 500.0, 5)
femmesh.addNode(592.5925925925918, 500.0, 500.0, 6)
femmesh.addNode(740.7407407407396, 500.0, 500.0, 7)
femmesh.addNode(888.8888888888874, 500.0, 500.0, 8)
femmesh.addNode(1037.0370370370354, 500.0, 500.0, 9)
femmesh.addNode(1185.1851851851832, 500.0, 500.0, 10)
femmesh.addNode(1333.333333333331, 500.0, 500.0, 11)
femmesh.addNode(1481.4814814814792, 500.0, 500.0, 12)
femmesh.addNode(1629.6296296296275, 500.0, 500.0, 13)
femmesh.addNode(1777.7777777777753, 500.0, 500.0, 14)
femmesh.addNode(1925.9259259259236, 500.0, 500.0, 15)
femmesh.addNode(2074.0740740740716, 500.0, 500.0, 16)
femmesh.addNode(2222.22222222222, 500.0, 500.0, 17)
femmesh.addNode(2370.370370370368, 500.0, 500.0, 18)
femmesh.addNode(2518.5185185185155, 500.0, 500.0, 19)
femmesh.addNode(2666.666666666663, 500.0, 500.0, 20)
femmesh.addNode(2814.8148148148107, 500.0, 500.0, 21)
femmesh.addNode(2962.962962962958, 500.0, 500.0, 22)
femmesh.addNode(3111.1111111111054, 500.0, 500.0, 23)
femmesh.addNode(3259.259259259253, 500.0, 500.0, 24)
femmesh.addNode(3407.4074074074006, 500.0, 500.0, 25)
femmesh.addNode(3555.555555555548, 500.0, 500.0, 26)
femmesh.addNode(3703.7037037036957, 500.0, 500.0, 27)
femmesh.addNode(3851.851851851843, 500.0, 500.0, 28)
femmesh.addNode(3999.9999999999905, 500.0, 500.0, 29)
femmesh.addNode(4148.148148148138, 500.0, 500.0, 30)
femmesh.addNode(4296.296296296286, 500.0, 500.0, 31)
femmesh.addNode(4444.4444444444325, 500.0, 500.0, 32)
femmesh.addNode(4592.59259259258, 500.0, 500.0, 33)
femmesh.addNode(4740.740740740728, 500.0, 500.0, 34)
femmesh.addNode(4888.888888888877, 500.0, 500.0, 35)
femmesh.addNode(5037.037037037026, 500.0, 500.0, 36)
femmesh.addNode(5185.185185185173, 500.0, 500.0, 37)
femmesh.addNode(5333.333333333322, 500.0, 500.0, 38)
femmesh.addNode(5481.481481481471, 500.0, 500.0, 39)
femmesh.addNode(5629.6296296296205, 500.0, 500.0, 40)
femmesh.addNode(5777.777777777769, 500.0, 500.0, 41)
femmesh.addNode(5925.925925925918, 500.0, 500.0, 42)
femmesh.addNode(6074.074074074067, 500.0, 500.0, 43)
femmesh.addNode(6222.222222222214, 500.0, 500.0, 44)
femmesh.addNode(6370.370370370363, 500.0, 500.0, 45)
femmesh.addNode(6518.518518518513, 500.0, 500.0, 46)
femmesh.addNode(6666.6666666666615, 500.0, 500.0, 47)
femmesh.addNode(6814.81481481481, 500.0, 500.0, 48)
femmesh.addNode(6962.962962962959, 500.0, 500.0, 49)
femmesh.addNode(7111.111111111108, 500.0, 500.0, 50)
femmesh.addNode(7259.259259259256, 500.0, 500.0, 51)
femmesh.addNode(7407.407407407406, 500.0, 500.0, 52)
femmesh.addNode(7555.555555555554, 500.0, 500.0, 53)
femmesh.addNode(7703.703703703703, 500.0, 500.0, 54)
femmesh.addNode(7851.851851851851, 500.0, 500.0, 55)
return True
def create_elements(femmesh):
# elements
femmesh.addEdge([1, 3], 1)
femmesh.addEdge([3, 4], 2)
femmesh.addEdge([4, 5], 3)
femmesh.addEdge([5, 6], 4)
femmesh.addEdge([6, 7], 5)
femmesh.addEdge([7, 8], 6)
femmesh.addEdge([8, 9], 7)
femmesh.addEdge([9, 10], 8)
femmesh.addEdge([10, 11], 9)
femmesh.addEdge([11, 12], 10)
femmesh.addEdge([12, 13], 11)
femmesh.addEdge([13, 14], 12)
femmesh.addEdge([14, 15], 13)
femmesh.addEdge([15, 16], 14)
femmesh.addEdge([16, 17], 15)
femmesh.addEdge([17, 18], 16)
femmesh.addEdge([18, 19], 17)
femmesh.addEdge([19, 20], 18)
femmesh.addEdge([20, 21], 19)
femmesh.addEdge([21, 22], 20)
femmesh.addEdge([22, 23], 21)
femmesh.addEdge([23, 24], 22)
femmesh.addEdge([24, 25], 23)
femmesh.addEdge([25, 26], 24)
femmesh.addEdge([26, 27], 25)
femmesh.addEdge([27, 28], 26)
femmesh.addEdge([28, 29], 27)
femmesh.addEdge([29, 30], 28)
femmesh.addEdge([30, 31], 29)
femmesh.addEdge([31, 32], 30)
femmesh.addEdge([32, 33], 31)
femmesh.addEdge([33, 34], 32)
femmesh.addEdge([34, 35], 33)
femmesh.addEdge([35, 36], 34)
femmesh.addEdge([36, 37], 35)
femmesh.addEdge([37, 38], 36)
femmesh.addEdge([38, 39], 37)
femmesh.addEdge([39, 40], 38)
femmesh.addEdge([40, 41], 39)
femmesh.addEdge([41, 42], 40)
femmesh.addEdge([42, 43], 41)
femmesh.addEdge([43, 44], 42)
femmesh.addEdge([44, 45], 43)
femmesh.addEdge([45, 46], 44)
femmesh.addEdge([46, 47], 45)
femmesh.addEdge([47, 48], 46)
femmesh.addEdge([48, 49], 47)
femmesh.addEdge([49, 50], 48)
femmesh.addEdge([50, 51], 49)
femmesh.addEdge([51, 52], 50)
femmesh.addEdge([52, 53], 51)
femmesh.addEdge([53, 54], 52)
femmesh.addEdge([54, 55], 53)
femmesh.addEdge([55, 2], 54)
return True
| sanguinariojoe/FreeCAD | src/Mod/Fem/femexamples/meshes/mesh_canticcx_seg2.py | Python | lgpl-2.1 | 5,045 |
#!/usr/bin/env python
import pandas as pd
import requests
import os
import requests
from urllib.parse import urljoin
from bs4 import BeautifulSoup
import random
import time
df = pd.read_csv("metadata.csv")
#Don't download research papers with non-commercial license.
df.loc[df['license'] != "no-cc"]
df.loc[df["pmcid"].notnull()]
# For https documents
urls = []
for i in range(len(df["url"])) :
try:
if "https://www.ncbi.nlm.nih.gov/pmc/articles/" in df["url"][i] :
urls.append((df["cord_uid"][i],df["url"][i]))
except:
("")
print(len(urls))
count=0
#Download https documents from NCBI
for url in urls:
count+=1
if count%100 == 0:
print(count+" Research Papers Downloaded")
if count > 4803:
time.sleep(random.randint(1,2))
try:
folder_location = './store_https'
if os.path.exists("/usr/local/google/home/madhuparnab/Downloads/551982_1230614_bundle_archive/store_https/"+url[0]+".pdf") :
continue
if not os.path.exists(folder_location):os.mkdir(folder_location)
response = requests.get(url[1])
soup= BeautifulSoup(response.text, "html.parser")
for link in soup.select("a[href$='.pdf']"):
pdf_part = ""
if link["href"][0:4] == "/pmc":
pdf_part = link["href"]
else:
continue
l = "https://www.ncbi.nlm.nih.gov" + pdf_part
print(l)
filename = os.path.join(folder_location,url[0])
filename = filename + ".pdf"
os.system("curl " + l + " --output " + filename);
except:
print("error")
#For http Documents
urls = []
for i in range(len(df["url"])) :
try:
if not "https" in df["url"][i] :
urls.append((df["cord_uid"][i],df["url"][i]))
except:
("")
print(len(urls))
#download http documents from medrxiv
for url in urls:
if "medrxiv" in url[1]:
print(url[1])
folder_location = './store'
if not os.path.exists(folder_location):os.mkdir(folder_location)
response = requests.get(url[1])
print(response)
soup= BeautifulSoup(response.text, "html.parser")
for link in soup.select("a[href$='.pdf']"):
print(link)
print(link["href"])
l = "https://www.medrxiv.org/" + link["href"]
print(l)
filename = os.path.join(folder_location,url[0])
filename = filename + ".pdf"
with open(filename, 'wb') as f:
f.write(requests.get(urljoin(l,l)).content)
| googleinterns/cloudsearch-ai | helper_scripts/downloadResearchPapers.py | Python | apache-2.0 | 2,607 |
#!/usr/bin/env python
#---------------------------------------------------------------------------
# Copyright 2012-2019 The Open Source Electronic Health Record Alliance
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#---------------------------------------------------------------------------
from __future__ import print_function
from __future__ import with_statement
from builtins import object
import codecs
import sys
import os
import subprocess
import shutil
import glob
import argparse
# add the current to sys.path
SCRIPTS_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(SCRIPTS_DIR)
from VistATestClient import VistATestClientFactory, createTestClientArgParser
from VistARoutineExport import VistARoutineExport
from VistARoutineImport import VistARoutineImport
from VistAGlobalExport import VistAGlobalExport
from LoggerManager import logger, initConsoleLogging, initFileLogging
from VistATaskmanUtil import VistATaskmanUtil
from MCompReposReadMeGenerator import MCompReposReadMeGenerator
from GitUtils import switchBranch, getStatus
""" List of routine names that are excluded from export process """
ROUTINE_EXTRACT_EXCLUDE_LIST = (
"ZGO", "ZGI", "xobw.*", "%*",
"CHK2LEV", "CHKOP", "GENDASH", "GENOUT",
"GETPASS", "GTMHELP", "GTMHLPLD", "LOADOP",
"LOADVX", "MSG", "PINENTRY", "TTTGEN",
"TTTSCAN"
)
""" List of routine names that are to be included again after the above has excluded a set of them """
ROUTINE_EXTRACT_EXCEPTION_LIST = (
"%ut*",
)
""" Extract routines/globals from a VistA instance and import
to the git repository
"""
class VistADataExtractor(object):
def __init__(self, vistARepoDir, outputResultDir,
outputLogDir, routineOutDir=None,
gitBranch=None, generateReadMe=False,
serialExport=False):
assert os.path.exists(vistARepoDir)
assert os.path.exists(outputResultDir)
assert os.path.exists(outputLogDir)
self._vistARepoDir = vistARepoDir
self._outputLogDir = outputLogDir
self._outputResultDir = outputResultDir
self._packagesDir = os.path.join(self._vistARepoDir, "Packages")
assert os.path.exists(self._packagesDir)
self._packagesCSV = os.path.normpath(os.path.join(self._vistARepoDir,
"Packages.csv"))
assert os.path.exists(self._packagesCSV)
self._routineOutputFile = os.path.join(self._outputResultDir, "Routines.ro")
self._globalOutputDir = os.path.join(self._outputResultDir, "Globals")
if not os.path.exists(self._globalOutputDir):
os.mkdir(self._globalOutputDir)
if routineOutDir:
assert os.path.exists(routineOutDir)
self._routineOutDir = routineOutDir
self._generateReadMe = generateReadMe
self._gitBranch = gitBranch
self._serialExport = serialExport
def extractData(self, vistATestClient):
self.__setupLogging__(vistATestClient)
self.__switchBranch__()
self.__stopTaskman__(vistATestClient)
self.__extractRoutines__(vistATestClient)
self.__importZGORoutine__(vistATestClient)
self.__exportAllGlobals__(vistATestClient)
self.__chmodGlobalOutput__()
self.__removePackagesTree__()
self.__unpackRoutines__()
self.__copyAllGlobals__()
self.__splitGlobalFiles__()
self.__populatePackageFiles__()
self.__generatePackageReadMes__()
self.__reportGitStatus__()
self.__cleanup__()
def __setupLogging__(self, vistATestClient):
DEFAULT_LOGGING_FILENAME = "VistADataExtractor.log"
DEFAULT_EXPECT_LOG_FILENAME = "VistAPExpect.log"
vistATestClient.setLogFile(os.path.join(self._outputLogDir,
DEFAULT_EXPECT_LOG_FILENAME))
initFileLogging(os.path.join(self._outputLogDir,
DEFAULT_LOGGING_FILENAME))
def __stopTaskman__(self, vistATestClient):
connection = vistATestClient.getConnection()
connection.send("S DUZ=.5 D ^XUP\n")
connection.expect("OPTION")
connection.send("\n")
vistATestClient.waitForPrompt()
taskmanUtil = VistATaskmanUtil()
taskmanUtil.shutdownAllTasks(vistATestClient)
def __extractRoutines__(self, vistATestClient):
# do not export ZGO, ZGI and xobw.* routines for now
excludeList = ROUTINE_EXTRACT_EXCLUDE_LIST
exceptionList = ROUTINE_EXTRACT_EXCEPTION_LIST
vistARoutineExport = VistARoutineExport()
logger.info("Extracting All Routines from VistA instance to %s" %
self._routineOutputFile)
vistARoutineExport.exportAllRoutines(vistATestClient,
self._routineOutputFile,
excludeList,
exceptionList)
def __importZGORoutine__(self, vistATestClient):
logger.info("Import ZGO routine to VistA instance")
from PackRO import pack
zgoOutFile = os.path.join(self._outputResultDir, "ZGO.ro")
zgoRoutine = os.path.join(SCRIPTS_DIR, "ZGO.m")
assert os.path.exists(zgoRoutine)
pack([zgoRoutine], open(zgoOutFile, 'w'))
vistARoutineImport = VistARoutineImport()
vistARoutineImport.importRoutines(vistATestClient, zgoOutFile,
self._routineOutDir)
def __exportAllGlobals__(self, vistATestClient):
""" remove all the zwr files first """
logger.info("Remove all zwr files under %s" % self._globalOutputDir)
for file in glob.glob(os.path.join(self._globalOutputDir, "*.zwr")):
os.remove(file)
logger.info("Exporting all globals from VistA instance")
vistAGlobalExport = VistAGlobalExport()
vistAGlobalExport.exportAllGlobals(vistATestClient, self._globalOutputDir, self._serialExport)
def __removePackagesTree__(self):
logger.info("Removing all files under %s" % self._packagesDir)
for dirEntry in os.listdir(self._packagesDir):
if dirEntry == ".gitattributes": # ignore the .gitattributes
continue
fullPath = os.path.join(self._packagesDir, dirEntry)
if os.path.isdir(fullPath):
shutil.rmtree(fullPath)
else:
os.remove(fullPath)
def unpackRoutines(self, routinesOutputFile, outputDir):
from UnpackRO import unpack
assert os.path.exists(routinesOutputFile)
assert os.path.exists(outputDir)
absOutDir = os.path.abspath(outputDir)
logfile = os.path.join(self._outputLogDir, "unpackro.log")
logger.info("Unpack routines from %s to %s" %
(routinesOutputFile, outputDir))
with codecs.open(routinesOutputFile, 'r', encoding='ISO-8859-1', errors='ignore') as routineFile: # open as txt
with open(logfile, 'w') as logFile:
unpack(routineFile, out=logFile, odir=outputDir)
def __unpackRoutines__(self):
self.unpackRoutines(self._routineOutputFile, self._packagesDir)
def __copyAllGlobals__(self):
logger.info("Copying all files from %s to %s" %
(self._globalOutputDir, self._packagesDir))
zwrFiles = glob.glob(os.path.join(self._globalOutputDir, "*.zwr"))
for zwrFile in zwrFiles:
logger.debug("Copying %s to %s" % (zwrFile, self._packagesDir))
shutil.copy2(zwrFile, self._packagesDir)
def __splitGlobalFiles__(self):
from SplitZWR import splitZWR
zwrFiles = glob.glob(os.path.join(self._packagesDir, "*.zwr"))
maxSize = 64 << 20
for f in zwrFiles:
if os.stat(f).st_size > maxSize:
splitZWR(f, maxSize)
def __populatePackageFiles__(self):
from PopulatePackages import populate
curDir = os.getcwd()
os.chdir(self._packagesDir)
populate(open(self._packagesCSV, "r"))
os.chdir(curDir)
def __chmodGlobalOutput__(self):
allZWRFiles = glob.glob(os.path.join(self._globalOutputDir,
"*.zwr"))
for file in allZWRFiles:
os.chmod(file, 0o644)
def __generatePackageReadMes__(self):
# assume runs from the scripts directory
if not self._generateReadMe:
return
curDir = os.getcwd()
inputDir = os.path.normpath(os.path.join(curDir, "../"))
readMeGen = MCompReposReadMeGenerator(inputDir,
self._vistARepoDir)
readMeGen.generatePackageReadMes()
def __switchBranch__(self):
if self._gitBranch is None:
return
switchBranch(self._gitBranch, self._vistARepoDir)
def __reportGitStatus__(self):
logger.info("Reporting git status on Uncategorized dir")
output = getStatus(self._packagesDir, 'Uncategorized/')
logger.info(output)
logger.info("Reporting git status on Packages dir")
output = getStatus(self._packagesDir)
logger.info(output)
def __cleanup__(self):
pass
def main():
testClientParser = createTestClientArgParser()
parser = argparse.ArgumentParser(description='VistA M Component Extractor',
parents=[testClientParser])
parser.add_argument('-o', '--outputDir', required=True,
help='output Dir to store global/routine export files')
parser.add_argument('-r', '--vistARepo', required=True,
help='path to the top directory of VistA-M repository')
parser.add_argument('-l', '--logDir', required=True,
help='path to the top directory to store the log files')
parser.add_argument('-ro', '--routineOutDir', default=None,
help='path to the directory where GT. M stores routines')
parser.add_argument('-sx', '--serialize', default=False, action="store_true",
help = 'export the globals serially (Needed on on single-user Cache instace)')
result = parser.parse_args();
print (result)
outputDir = result.outputDir
assert os.path.exists(outputDir)
initConsoleLogging()
""" create the VistATestClient"""
testClient = VistATestClientFactory.createVistATestClientWithArgs(result)
assert testClient
with testClient as vistAClient:
vistADataExtractor = VistADataExtractor(result.vistARepo,
outputDir,
result.logDir,
result.routineOutDir,
serialExport = result.serialize)
vistADataExtractor.extractData(testClient)
def test1():
vistADataExtractor = VistADataExtractor(".",".",".")
vistADataExtractor.unpackRoutines(sys.argv[1], sys.argv[2])
#vistADataExtractor.__chmodGlobalOutput__()
#vistADataExtractor.__removePackagesTree__()
#vistADataExtractor.__unpackRoutines__()
#vistADataExtractor.__copyAllGlobals__()
#vistADataExtractor.__splitGlobalFiles__()
#vistADataExtractor.__populatePackageFiles__()
if __name__ == '__main__':
#test1()
main()
| josephsnyder/VistA | Scripts/VistAMComponentExtractor.py | Python | apache-2.0 | 11,188 |
import os
import json
try:
import yaml
except ImportError:
yaml = None
def root():
''' Assuming that this function is in root.utils, returns the root directory
of the project. '''
path, _ = os.path.split(__file__)
return os.path.abspath(path)
def loadfile(filename, _format=None):
''' Loads a file at a particular `filename` location. '''
with open(filename) as file:
data = file.read()
if not _format:
return data
elif _format=='json':
return json.loads(data)
elif _format=='yaml':
if yaml:
return yaml.load(data)
else:
print "yaml support is not currently installed."
elif _format=='split':
return data.split()
| kreativitea/RandomData | utils.py | Python | mit | 739 |
#!/usr/bin/python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# This program splits up a jprof profile into multiple files based on a
# list of functions in a text file. First, a complete profile is
# generated. Then, for each line in the text file, a profile is
# generated containing only stacks that go through that line, and also
# excluding all stacks in earlier lines in the text file. This means
# that the text file, from start to end, is splitting out pieces of the
# profile in their own file. Finally, a final profile containing the
# remainder is produced.
# The program takes four arguments:
# (1) The path to jprof.
# (2) The path to the text file describing the splits. The output
# will be placed in the same directory as this file.
# (3) The program that was profiled.
# (4) The jprof-log file generated by the profile, to be split up.
# (Really, all arguments from (3) and later are passed through to
# jprof, so additional arguments could be provided if you want to pass
# additional arguments to jprof.)
# In slightly more detail:
#
# This script uses jprof's includes (-i) and excludes (-e) options to
# split profiles into segments. It takes as input a single text file,
# and from that text file creates a series of jprof profiles in the
# directory the text file is in.
#
# The input file format looks like the following:
#
# poll g_main_poll
# GetRuleCascade CSSRuleProcessor::GetRuleCascade(nsPresContext *, nsIAtom *)
# RuleProcessorData RuleProcessorData::RuleProcessorData(nsPresContext *, nsIContent *, nsRuleWalker *, nsCompatibility *)
#
# From this input file, the script will construct a profile called
# jprof-0.html that contains the whole profile, a profile called
# jprof-1-poll.html that includes only stacks with g_main_poll, a
# profile called jprof-2-GetRuleCascade.html that includes only stacks
# that have GetRuleCascade and do not have g_main_poll, a profile called
# jprof-3-RuleProcessorData.html that includes only stacks that have the
# RuleProcessorData constructor and do not have GetRuleCascade or
# g_main_poll, and a profile called jprof-4.html that includes only
# stacks that do not have any of the three functions in them.
#
# This means that all of the segments of the profile, except
# jprof-0.html, are mutually exclusive. Thus clever ordering of the
# functions in the input file can lead to a logical splitting of the
# profile into segments.
import sys
import subprocess
import os.path
if len(sys.argv) < 5:
sys.stderr.write("Expected arguments: <jprof> <split-file> <program> <jprof-log>\n")
sys.exit(1)
jprof = sys.argv[1]
splitfile = sys.argv[2]
passthrough = sys.argv[3:]
for f in [jprof, splitfile]:
if not os.path.isfile(f):
sys.stderr.write("could not find file: {0}\n".format(f))
sys.exit(1)
def read_splits(splitfile):
"""
Read splitfile (each line of which contains a name, a space, and
then a function name to split on), and return a list of pairs
representing exactly that. (Note that the name cannot contain
spaces, but the function name can, and often does.)
"""
def line_to_split(line):
line = line.strip("\r\n")
idx = line.index(" ")
return (line[0:idx], line[idx+1:])
io = open(splitfile, "r")
result = [line_to_split(line) for line in io]
io.close()
return result
splits = read_splits(splitfile)
def generate_profile(options, destfile):
"""
Run jprof to generate one split of the profile.
"""
args = [jprof] + options + passthrough
print "Generating {0}".format(destfile)
destio = open(destfile, "w")
# jprof expects the "jprof-map" file to be in its current working directory
cwd = None
for option in passthrough:
if option.find("jprof-log"):
cwd = os.path.dirname(option)
if cwd is None:
raise StandardError("no jprof-log option given")
process = subprocess.Popen(args, stdout=destio, cwd=cwd)
process.wait()
destio.close()
if process.returncode != 0:
os.remove(destfile)
sys.stderr.write("Error {0} from command:\n {1}\n".format(process.returncode, " ".join(args)))
sys.exit(process.returncode)
def output_filename(number, splitname):
"""
Return the filename (absolute path) we should use to output the
profile segment with the given number and splitname. Splitname
should be None for the complete profile and the remainder.
"""
def pad_count(i):
result = str(i)
# 0-pad to the same length
result = "0" * (len(str(len(splits) + 1)) - len(result)) + result
return result
name = pad_count(number)
if splitname is not None:
name += "-" + splitname
return os.path.join(os.path.dirname(splitfile),
"jprof-{0}.html".format(name))
# generate the complete profile
generate_profile([], output_filename(0, None))
# generate the listed splits
count = 1
excludes = []
for (splitname, splitfunction) in splits:
generate_profile(excludes + ["-i" + splitfunction],
output_filename(count, splitname))
excludes += ["-e" + splitfunction]
count = count + 1
# generate the remainder after the splits
generate_profile(excludes, output_filename(count, None))
| Yukarumya/Yukarum-Redfoxes | tools/jprof/split-profile.py | Python | mpl-2.0 | 5,461 |
import os, sys, urllib, textwrap
import codecs
from twisted.python import usage
from allmydata.util.assertutil import precondition
from allmydata.util.encodingutil import unicode_to_url, quote_output, \
quote_local_unicode_path, argv_to_abspath
from allmydata.scripts.default_nodedir import _default_nodedir
def get_default_nodedir():
return _default_nodedir
def wrap_paragraphs(text, width):
# like textwrap.wrap(), but preserve paragraphs (delimited by double
# newlines) and leading whitespace, and remove internal whitespace.
text = textwrap.dedent(text)
if text.startswith("\n"):
text = text[1:]
return "\n\n".join([textwrap.fill(paragraph, width=width)
for paragraph in text.split("\n\n")])
class BaseOptions(usage.Options):
def __init__(self):
super(BaseOptions, self).__init__()
self.command_name = os.path.basename(sys.argv[0])
# Only allow "tahoe --version", not e.g. "tahoe start --version"
def opt_version(self):
raise usage.UsageError("--version not allowed on subcommands")
description = None
description_unwrapped = None
def __str__(self):
width = int(os.environ.get('COLUMNS', '80'))
s = (self.getSynopsis() + '\n' +
"(use 'tahoe --help' to view global options)\n" +
'\n' +
self.getUsage())
if self.description:
s += '\n' + wrap_paragraphs(self.description, width) + '\n'
if self.description_unwrapped:
du = textwrap.dedent(self.description_unwrapped)
if du.startswith("\n"):
du = du[1:]
s += '\n' + du + '\n'
return s
class BasedirOptions(BaseOptions):
default_nodedir = _default_nodedir
optParameters = [
["basedir", "C", None, "Specify which Tahoe base directory should be used. [default: %s]"
% quote_local_unicode_path(_default_nodedir)],
]
def parseArgs(self, basedir=None):
# This finds the node-directory option correctly even if we are in a subcommand.
root = self.parent
while root.parent is not None:
root = root.parent
if root['node-directory'] and self['basedir']:
raise usage.UsageError("The --node-directory (or -d) and --basedir (or -C) options cannot both be used.")
if root['node-directory'] and basedir:
raise usage.UsageError("The --node-directory (or -d) option and a basedir argument cannot both be used.")
if self['basedir'] and basedir:
raise usage.UsageError("The --basedir (or -C) option and a basedir argument cannot both be used.")
if basedir:
b = argv_to_abspath(basedir)
elif self['basedir']:
b = argv_to_abspath(self['basedir'])
elif root['node-directory']:
b = argv_to_abspath(root['node-directory'])
elif self.default_nodedir:
b = self.default_nodedir
else:
raise usage.UsageError("No default basedir available, you must provide one with --node-directory, --basedir, or a basedir argument")
self['basedir'] = b
self['node-directory'] = b
def postOptions(self):
if not self['basedir']:
raise usage.UsageError("A base directory for the node must be provided.")
class NoDefaultBasedirOptions(BasedirOptions):
default_nodedir = None
optParameters = [
["basedir", "C", None, "Specify which Tahoe base directory should be used."],
]
# This is overridden in order to ensure we get a "Wrong number of arguments."
# error when more than one argument is given.
def parseArgs(self, basedir=None):
BasedirOptions.parseArgs(self, basedir)
def getSynopsis(self):
return "Usage: %s [global-options] %s [options] NODEDIR" % (self.command_name, self.subcommand_name)
DEFAULT_ALIAS = u"tahoe"
def get_aliases(nodedir):
aliases = {}
aliasfile = os.path.join(nodedir, "private", "aliases")
rootfile = os.path.join(nodedir, "private", "root_dir.cap")
try:
f = open(rootfile, "r")
rootcap = f.read().strip()
if rootcap:
aliases[DEFAULT_ALIAS] = rootcap
except EnvironmentError:
pass
try:
f = codecs.open(aliasfile, "r", "utf-8")
for line in f.readlines():
line = line.strip()
if line.startswith("#") or not line:
continue
name, cap = line.split(u":", 1)
# normalize it: remove http: prefix, urldecode
cap = cap.strip().encode('utf-8')
aliases[name] = cap
except EnvironmentError:
pass
return aliases
class DefaultAliasMarker:
pass
pretend_platform_uses_lettercolon = False # for tests
def platform_uses_lettercolon_drivename():
if ("win32" in sys.platform.lower()
or "cygwin" in sys.platform.lower()
or pretend_platform_uses_lettercolon):
return True
return False
class TahoeError(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
self.msg = msg
def display(self, err):
print >>err, self.msg
class UnknownAliasError(TahoeError):
def __init__(self, msg):
TahoeError.__init__(self, "error: " + msg)
def get_alias(aliases, path_unicode, default):
"""
Transform u"work:path/filename" into (aliases[u"work"], u"path/filename".encode('utf-8')).
If default=None, then an empty alias is indicated by returning
DefaultAliasMarker. We special-case strings with a recognized cap URI
prefix, to make it easy to access specific files/directories by their
caps.
If the transformed alias is either not found in aliases, or is blank
and default is not found in aliases, an UnknownAliasError is
raised.
"""
precondition(isinstance(path_unicode, unicode), path_unicode)
from allmydata import uri
path = path_unicode.encode('utf-8').strip(" ")
if uri.has_uri_prefix(path):
# We used to require "URI:blah:./foo" in order to get a subpath,
# stripping out the ":./" sequence. We still allow that for compatibility,
# but now also allow just "URI:blah/foo".
sep = path.find(":./")
if sep != -1:
return path[:sep], path[sep+3:]
sep = path.find("/")
if sep != -1:
return path[:sep], path[sep+1:]
return path, ""
colon = path.find(":")
if colon == -1:
# no alias
if default == None:
return DefaultAliasMarker, path
if default not in aliases:
raise UnknownAliasError("No alias specified, and the default %s alias doesn't exist. "
"To create it, use 'tahoe create-alias %s'."
% (quote_output(default), quote_output(default, quotemarks=False)))
return uri.from_string_dirnode(aliases[default]).to_string(), path
if colon == 1 and default is None and platform_uses_lettercolon_drivename():
# treat C:\why\must\windows\be\so\weird as a local path, not a tahoe
# file in the "C:" alias
return DefaultAliasMarker, path
# decoding must succeed because path is valid UTF-8 and colon & space are ASCII
alias = path[:colon].decode('utf-8')
if u"/" in alias:
# no alias, but there's a colon in a dirname/filename, like
# "foo/bar:7"
if default == None:
return DefaultAliasMarker, path
if default not in aliases:
raise UnknownAliasError("No alias specified, and the default %s alias doesn't exist. "
"To create it, use 'tahoe create-alias %s'."
% (quote_output(default), quote_output(default, quotemarks=False)))
return uri.from_string_dirnode(aliases[default]).to_string(), path
if alias not in aliases:
raise UnknownAliasError("Unknown alias %s, please create it with 'tahoe add-alias' or 'tahoe create-alias'." %
quote_output(alias))
return uri.from_string_dirnode(aliases[alias]).to_string(), path[colon+1:]
def escape_path(path):
# this always returns bytes, specifically US-ASCII, valid URL characters
segments = path.split("/")
return "/".join([urllib.quote(unicode_to_url(s)) for s in segments])
| david415/tahoe-lafs | src/allmydata/scripts/common.py | Python | gpl-2.0 | 8,400 |
def readDensityFile(infile):
infile.next()
infile.next()
density = []
for line in infile:
tmp = line.split(",")
tmp2 = tmp[0].split("(")
d = float(tmp2[1])
density.append(d)
infile.close()
return density
infile = open("DensityFCI_w=075_N=4_L=10.txt",'r')
densityFCI = readDensityFile(infile)
infile = open("DensityCCSD_w=075_N=4_L=10.txt")
densityCCSD = readDensityFile(infile)
infile = open("DensityCISD_w=075_N=4_L=10.txt")
densityCISD = readDensityFile(infile)
from numpy import *
import sys
densityFCI = array(densityFCI)
densityCCSD = array(densityCCSD)
densityCISD = array(densityCISD)
diff_FCI_CCSD = abs(densityFCI-densityCCSD)
Lx = 10
x = linspace(-Lx,Lx,len(densityFCI))
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot(x, densityFCI,'-b')
plt.plot(x,densityCCSD,'-r')
plt.plot(x,densityCISD,'-g')
plt.xlabel("x",fontsize=16)
plt.ylabel(r'$\rho(x,x)$',fontsize=16)
plt.legend(["FCI","CCSD","CISD"])
plt.show()
| hakii27/PythonVersionMaster | Results/OneDimDot/FourElectrons/DensityPlot.py | Python | lgpl-3.0 | 956 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generate java source files from protobuf files.
Usage:
protoc_java.py {protoc} {proto_path} {java_out} {stamp_file} {proto_files}
This is a helper file for the genproto_java action in protoc_java.gypi.
It performs the following steps:
1. Deletes all old sources (ensures deleted classes are not part of new jars).
2. Creates source directory.
3. Generates Java files using protoc.
4. Creates a new stamp file.
"""
import os
import shutil
import subprocess
import sys
def main(argv):
if len(argv) < 5:
usage()
return 1
protoc_path, proto_path, java_out, stamp_file = argv[1:5]
proto_files = argv[5:]
# Delete all old sources.
if os.path.exists(java_out):
shutil.rmtree(java_out)
# Create source directory.
os.makedirs(java_out)
# Specify arguments to the generator.
generator_args = ['optional_field_style=reftypes',
'store_unknown_fields=true']
out_arg = '--javanano_out=' + ','.join(generator_args) + ':' + java_out
# Generate Java files using protoc.
ret = subprocess.call(
[protoc_path, '--proto_path', proto_path, out_arg] + proto_files)
if ret == 0:
# Create a new stamp file.
with file(stamp_file, 'a'):
os.utime(stamp_file, None)
return ret
def usage():
print(__doc__);
if __name__ == '__main__':
sys.exit(main(sys.argv))
| s20121035/rk3288_android5.1_repo | external/chromium_org/build/protoc_java.py | Python | gpl-3.0 | 1,523 |
import importlib
import inspect
import os
import re
import sys
import tempfile
import threading
from io import StringIO
from pathlib import Path
from unittest import mock
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import DatabaseError, connection
from django.http import Http404
from django.shortcuts import render
from django.template import TemplateDoesNotExist
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import LoggingCaptureMixin
from django.urls import path, reverse
from django.urls.converters import IntConverter
from django.utils.functional import SimpleLazyObject
from django.utils.safestring import mark_safe
from django.views.debug import (
CLEANSED_SUBSTITUTE, CallableSettingWrapper, ExceptionReporter,
Path as DebugPath, cleanse_setting, default_urlconf,
technical_404_response, technical_500_response,
)
from ..views import (
custom_exception_reporter_filter_view, index_page,
multivalue_dict_key_error, non_sensitive_view, paranoid_view,
sensitive_args_function_caller, sensitive_kwargs_function_caller,
sensitive_method_view, sensitive_view,
)
class User:
def __str__(self):
return 'jacob'
class WithoutEmptyPathUrls:
urlpatterns = [path('url/', index_page, name='url')]
class CallableSettingWrapperTests(SimpleTestCase):
""" Unittests for CallableSettingWrapper
"""
def test_repr(self):
class WrappedCallable:
def __repr__(self):
return "repr from the wrapped callable"
def __call__(self):
pass
actual = repr(CallableSettingWrapper(WrappedCallable()))
self.assertEqual(actual, "repr from the wrapped callable")
@override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls')
class DebugViewTests(SimpleTestCase):
def test_files(self):
with self.assertLogs('django.request', 'ERROR'):
response = self.client.get('/raises/')
self.assertEqual(response.status_code, 500)
data = {
'file_data.txt': SimpleUploadedFile('file_data.txt', b'haha'),
}
with self.assertLogs('django.request', 'ERROR'):
response = self.client.post('/raises/', data)
self.assertContains(response, 'file_data.txt', status_code=500)
self.assertNotContains(response, 'haha', status_code=500)
def test_400(self):
# When DEBUG=True, technical_500_template() is called.
with self.assertLogs('django.security', 'WARNING'):
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
# Ensure no 403.html template exists to test the default case.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
}])
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
# Set up a test 403.html template.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.locmem.Loader', {
'403.html': 'This is a test template for a 403 error ({{ exception }}).',
}),
],
},
}])
def test_403_template(self):
response = self.client.get('/raises403/')
self.assertContains(response, 'test template', status_code=403)
self.assertContains(response, '(Insufficient Permissions).', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
def test_404_not_in_urls(self):
response = self.client.get('/not-in-urls')
self.assertNotContains(response, "Raised by:", status_code=404)
self.assertContains(response, "Django tried these URL patterns", status_code=404)
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
# Pattern and view name of a RegexURLPattern appear.
self.assertContains(response, r"^regex-post/(?P<pk>[0-9]+)/$", status_code=404)
self.assertContains(response, "[name='regex-post']", status_code=404)
# Pattern and view name of a RoutePattern appear.
self.assertContains(response, r"path-post/<int:pk>/", status_code=404)
self.assertContains(response, "[name='path-post']", status_code=404)
@override_settings(ROOT_URLCONF=WithoutEmptyPathUrls)
def test_404_empty_path_not_in_urls(self):
response = self.client.get('/')
self.assertContains(response, "The empty path didn't match any of these.", status_code=404)
def test_technical_404(self):
response = self.client.get('/technical404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.technical404", status_code=404)
def test_classbased_technical_404(self):
response = self.client.get('/classbased404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.Http404View", status_code=404)
def test_non_l10ned_numeric_ids(self):
"""
Numeric IDs and fancy traceback context blocks line numbers shouldn't be localized.
"""
with self.settings(DEBUG=True, USE_L10N=True):
with self.assertLogs('django.request', 'ERROR'):
response = self.client.get('/raises500/')
# We look for a HTML fragment of the form
# '<div class="context" id="c38123208">', not '<div class="context" id="c38,123,208"'
self.assertContains(response, '<div class="context" id="', status_code=500)
match = re.search(b'<div class="context" id="(?P<id>[^"]+)">', response.content)
self.assertIsNotNone(match)
id_repr = match.group('id')
self.assertFalse(
re.search(b'[^c0-9]', id_repr),
"Numeric IDs in debug response HTML page shouldn't be localized (value: %s)." % id_repr.decode()
)
def test_template_exceptions(self):
with self.assertLogs('django.request', 'ERROR'):
try:
self.client.get(reverse('template_exception'))
except Exception:
raising_loc = inspect.trace()[-1][-2][0].strip()
self.assertNotEqual(
raising_loc.find("raise Exception('boom')"), -1,
"Failed to find 'raise Exception' in last frame of "
"traceback, instead found: %s" % raising_loc
)
def test_template_loader_postmortem(self):
"""Tests for not existing file"""
template_name = "notfound.html"
with tempfile.NamedTemporaryFile(prefix=template_name) as tmpfile:
tempdir = os.path.dirname(tmpfile.name)
template_path = os.path.join(tempdir, template_name)
with override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [tempdir],
}]), self.assertLogs('django.request', 'ERROR'):
response = self.client.get(reverse('raises_template_does_not_exist', kwargs={"path": template_name}))
self.assertContains(response, "%s (Source does not exist)" % template_path, status_code=500, count=2)
# Assert as HTML.
self.assertContains(
response,
'<li><code>django.template.loaders.filesystem.Loader</code>: '
'%s (Source does not exist)</li>' % os.path.join(tempdir, 'notfound.html'),
status_code=500,
html=True,
)
def test_no_template_source_loaders(self):
"""
Make sure if you don't specify a template, the debug view doesn't blow up.
"""
with self.assertLogs('django.request', 'ERROR'):
with self.assertRaises(TemplateDoesNotExist):
self.client.get('/render_no_template/')
@override_settings(ROOT_URLCONF='view_tests.default_urls')
def test_default_urlconf_template(self):
"""
Make sure that the default URLconf template is shown shown instead
of the technical 404 page, if the user has not altered their
URLconf yet.
"""
response = self.client.get('/')
self.assertContains(
response,
"<h2>The install worked successfully! Congratulations!</h2>"
)
@override_settings(ROOT_URLCONF='view_tests.regression_21530_urls')
def test_regression_21530(self):
"""
Regression test for bug #21530.
If the admin app include is replaced with exactly one url
pattern, then the technical 404 template should be displayed.
The bug here was that an AttributeError caused a 500 response.
"""
response = self.client.get('/')
self.assertContains(
response,
"Page not found <span>(404)</span>",
status_code=404
)
def test_template_encoding(self):
"""
The templates are loaded directly, not via a template loader, and
should be opened as utf-8 charset as is the default specified on
template engines.
"""
with mock.patch.object(DebugPath, 'open') as m:
default_urlconf(None)
m.assert_called_once_with(encoding='utf-8')
m.reset_mock()
technical_404_response(mock.MagicMock(), mock.Mock())
m.assert_called_once_with(encoding='utf-8')
def test_technical_404_converter_raise_404(self):
with mock.patch.object(IntConverter, 'to_python', side_effect=Http404):
response = self.client.get('/path-post/1/')
self.assertContains(response, 'Page not found', status_code=404)
class DebugViewQueriesAllowedTests(SimpleTestCase):
# May need a query to initialize MySQL connection
databases = {'default'}
def test_handle_db_exception(self):
"""
Ensure the debug view works when a database exception is raised by
performing an invalid query and passing the exception to the debug view.
"""
with connection.cursor() as cursor:
try:
cursor.execute('INVALID SQL')
except DatabaseError:
exc_info = sys.exc_info()
rf = RequestFactory()
response = technical_500_response(rf.get('/'), *exc_info)
self.assertContains(response, 'OperationalError at /', status_code=500)
@override_settings(
DEBUG=True,
ROOT_URLCONF='view_tests.urls',
# No template directories are configured, so no templates will be found.
TEMPLATES=[{
'BACKEND': 'django.template.backends.dummy.TemplateStrings',
}],
)
class NonDjangoTemplatesDebugViewTests(SimpleTestCase):
def test_400(self):
# When DEBUG=True, technical_500_template() is called.
with self.assertLogs('django.security', 'WARNING'):
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_template_not_found_error(self):
# Raises a TemplateDoesNotExist exception and shows the debug view.
url = reverse('raises_template_does_not_exist', kwargs={"path": "notfound.html"})
with self.assertLogs('django.request', 'ERROR'):
response = self.client.get(url)
self.assertContains(response, '<div class="context" id="', status_code=500)
class ExceptionReporterTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<p>jacob</p>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
self.assertIn('<p>No POST data</p>', html)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_eol_support(self):
"""The ExceptionReporter supports Unix, Windows and Macintosh EOL markers"""
LINES = ['print %d' % i for i in range(1, 6)]
reporter = ExceptionReporter(None, None, None, None)
for newline in ['\n', '\r\n', '\r']:
fd, filename = tempfile.mkstemp(text=False)
os.write(fd, (newline.join(LINES) + newline).encode())
os.close(fd)
try:
self.assertEqual(
reporter._get_lines_from_file(filename, 3, 2),
(1, LINES[1:3], LINES[3], LINES[4:])
)
finally:
os.unlink(filename)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">No exception message supplied</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_reporting_of_nested_exceptions(self):
request = self.rf.get('/test_view/')
try:
try:
raise AttributeError(mark_safe('<p>Top level</p>'))
except AttributeError as explicit:
try:
raise ValueError(mark_safe('<p>Second exception</p>')) from explicit
except ValueError:
raise IndexError(mark_safe('<p>Final exception</p>'))
except Exception:
# Custom exception handler, just pass it into ExceptionReporter
exc_type, exc_value, tb = sys.exc_info()
explicit_exc = 'The above exception ({0}) was the direct cause of the following exception:'
implicit_exc = 'During handling of the above exception ({0}), another exception occurred:'
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
# Both messages are twice on page -- one rendered as html,
# one as plain text (for pastebin)
self.assertEqual(2, html.count(explicit_exc.format('<p>Top level</p>')))
self.assertEqual(2, html.count(implicit_exc.format('<p>Second exception</p>')))
self.assertEqual(10, html.count('<p>Final exception</p>'))
text = reporter.get_traceback_text()
self.assertIn(explicit_exc.format('<p>Top level</p>'), text)
self.assertIn(implicit_exc.format('<p>Second exception</p>'), text)
self.assertEqual(3, text.count('<p>Final exception</p>'))
def test_reporting_frames_without_source(self):
try:
source = "def funcName():\n raise Error('Whoops')\nfuncName()"
namespace = {}
code = compile(source, 'generated', 'exec')
exec(code, namespace)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
frames = reporter.get_traceback_frames()
last_frame = frames[-1]
self.assertEqual(last_frame['context_line'], '<source code not available>')
self.assertEqual(last_frame['filename'], 'generated')
self.assertEqual(last_frame['function'], 'funcName')
self.assertEqual(last_frame['lineno'], 2)
html = reporter.get_traceback_html()
self.assertIn('generated in funcName, line 2', html)
self.assertIn(
'"generated", line 2, in funcName\n'
' <source code not available>',
html,
)
text = reporter.get_traceback_text()
self.assertIn(
'"generated", line 2, in funcName\n'
' <source code not available>',
text,
)
def test_reporting_frames_for_cyclic_reference(self):
try:
def test_func():
try:
raise RuntimeError('outer') from RuntimeError('inner')
except RuntimeError as exc:
raise exc.__cause__
test_func()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
def generate_traceback_frames(*args, **kwargs):
nonlocal tb_frames
tb_frames = reporter.get_traceback_frames()
tb_frames = None
tb_generator = threading.Thread(target=generate_traceback_frames, daemon=True)
tb_generator.start()
tb_generator.join(timeout=5)
if tb_generator.is_alive():
# tb_generator is a daemon that runs until the main thread/process
# exits. This is resource heavy when running the full test suite.
# Setting the following values to None makes
# reporter.get_traceback_frames() exit early.
exc_value.__traceback__ = exc_value.__context__ = exc_value.__cause__ = None
tb_generator.join()
self.fail('Cyclic reference in Exception Reporter.get_traceback_frames()')
if tb_frames is None:
# can happen if the thread generating traceback got killed
# or exception while generating the traceback
self.fail('Traceback generation failed')
last_frame = tb_frames[-1]
self.assertIn('raise exc.__cause__', last_frame['context_line'])
self.assertEqual(last_frame['filename'], __file__)
self.assertEqual(last_frame['function'], 'test_func')
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_non_utf8_values_handling(self):
"Non-UTF-8 exceptions/values should not make the output generation choke."
try:
class NonUtf8Output(Exception):
def __repr__(self):
return b'EXC\xe9EXC'
somevar = b'VAL\xe9VAL' # NOQA
raise NonUtf8Output()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('VAL\\xe9VAL', html)
self.assertIn('EXC\\xe9EXC', html)
def test_local_variable_escaping(self):
"""Safe strings in local variables are escaped."""
try:
local = mark_safe('<p>Local variable</p>')
raise ValueError(local)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
html = ExceptionReporter(None, exc_type, exc_value, tb).get_traceback_html()
self.assertIn('<td class="code"><pre>'<p>Local variable</p>'</pre></td>', html)
def test_unprintable_values_handling(self):
"Unprintable values should not make the output generation choke."
try:
class OomOutput:
def __repr__(self):
raise MemoryError('OOM')
oomvalue = OomOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<td class="code"><pre>Error in formatting', html)
def test_too_large_values_handling(self):
"Large values should not create a large HTML."
large = 256 * 1024
repr_of_str_adds = len(repr(''))
try:
class LargeOutput:
def __repr__(self):
return repr('A' * large)
largevalue = LargeOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertEqual(len(html) // 1024 // 128, 0) # still fit in 128Kb
self.assertIn('<trimmed %d bytes string>' % (large + repr_of_str_adds,), html)
def test_encoding_error(self):
"""
A UnicodeError displays a portion of the problematic string. HTML in
safe strings is escaped.
"""
try:
mark_safe('abcdefghijkl<p>mnὀp</p>qrstuwxyz').encode('ascii')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h2>Unicode error hint</h2>', html)
self.assertIn('The string that could not be encoded/decoded was: ', html)
self.assertIn('<strong><p>mnὀp</p></strong>', html)
def test_unfrozen_importlib(self):
"""
importlib is not a frozen app, but its loader thinks it's frozen which
results in an ImportError. Refs #21443.
"""
try:
request = self.rf.get('/test_view/')
importlib.import_module('abc.def.invalid.name')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ModuleNotFoundError at /test_view/</h1>', html)
def test_ignore_traceback_evaluation_exceptions(self):
"""
Don't trip over exceptions generated by crafted objects when
evaluating them while cleansing (#24455).
"""
class BrokenEvaluation(Exception):
pass
def broken_setup():
raise BrokenEvaluation
request = self.rf.get('/test_view/')
broken_lazy = SimpleLazyObject(broken_setup)
try:
bool(broken_lazy)
except BrokenEvaluation:
exc_type, exc_value, tb = sys.exc_info()
self.assertIn(
"BrokenEvaluation",
ExceptionReporter(request, exc_type, exc_value, tb).get_traceback_html(),
"Evaluation exception reason not mentioned in traceback"
)
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn("http://evil.com/", html)
def test_request_with_items_key(self):
"""
An exception report can be generated for requests with 'items' in
request GET, POST, FILES, or COOKIES QueryDicts.
"""
value = '<td>items</td><td class="code"><pre>'Oops'</pre></td>'
# GET
request = self.rf.get('/test_view/?items=Oops')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(value, html)
# POST
request = self.rf.post('/test_view/', data={'items': 'Oops'})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(value, html)
# FILES
fp = StringIO('filecontent')
request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(
'<td>items</td><td class="code"><pre><InMemoryUploadedFile: '
'items (application/octet-stream)></pre></td>',
html
)
# COOKIES
rf = RequestFactory()
rf.cookies['items'] = 'Oops'
request = rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML('<td>items</td><td class="code"><pre>'Oops'</pre></td>', html)
def test_exception_fetching_user(self):
"""
The error page can be rendered if the current user can't be retrieved
(such as when the database is unavailable).
"""
class ExceptionUser:
def __str__(self):
raise Exception()
request = self.rf.get('/test_view/')
request.user = ExceptionUser()
try:
raise ValueError('Oops')
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Oops</pre>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<p>[unable to retrieve the current user]</p>', html)
text = reporter.get_traceback_text()
self.assertIn('USER: [unable to retrieve the current user]', text)
def test_template_encoding(self):
"""
The templates are loaded directly, not via a template loader, and
should be opened as utf-8 charset as is the default specified on
template engines.
"""
reporter = ExceptionReporter(None, None, None, None)
with mock.patch.object(DebugPath, 'open') as m:
reporter.get_traceback_html()
m.assert_called_once_with(encoding='utf-8')
m.reset_mock()
reporter.get_traceback_text()
m.assert_called_once_with(encoding='utf-8')
class PlainTextReportTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError at /test_view/', text)
self.assertIn("Can't find my keys", text)
self.assertIn('Request Method:', text)
self.assertIn('Request URL:', text)
self.assertIn('USER: jacob', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback (most recent call last):', text)
self.assertIn('Request information:', text)
self.assertNotIn('Request data not supplied', text)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError', text)
self.assertIn("Can't find my keys", text)
self.assertNotIn('Request Method:', text)
self.assertNotIn('Request URL:', text)
self.assertNotIn('USER:', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback (most recent call last):', text)
self.assertIn('Request data not supplied', text)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
reporter.get_traceback_text()
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(DEBUG=True)
def test_template_exception(self):
request = self.rf.get('/test_view/')
try:
render(request, 'debug/template_error.html')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
templ_path = Path(Path(__file__).parents[1], 'templates', 'debug', 'template_error.html')
self.assertIn(
'Template error:\n'
'In template %(path)s, error at line 2\n'
' \'cycle\' tag requires at least two arguments\n'
' 1 : Template with error:\n'
' 2 : {%% cycle %%} \n'
' 3 : ' % {'path': templ_path},
text
)
def test_request_with_items_key(self):
"""
An exception report can be generated for requests with 'items' in
request GET, POST, FILES, or COOKIES QueryDicts.
"""
# GET
request = self.rf.get('/test_view/?items=Oops')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
# POST
request = self.rf.post('/test_view/', data={'items': 'Oops'})
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
# FILES
fp = StringIO('filecontent')
request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp})
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn('items = <InMemoryUploadedFile:', text)
# COOKIES
rf = RequestFactory()
rf.cookies['items'] = 'Oops'
request = rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("http://evil.com/", text)
class ExceptionReportTestMixin:
# Mixin used in the ExceptionReporterFilterTests and
# AjaxResponseExceptionReporterFilter tests below
breakfast_data = {
'sausage-key': 'sausage-value',
'baked-beans-key': 'baked-beans-value',
'hash-brown-key': 'hash-brown-value',
'bacon-key': 'bacon-value',
}
def verify_unsafe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# All variables are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertContains(response, k, status_code=500)
self.assertContains(response, v, status_code=500)
def verify_safe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Non-sensitive variable's name and value are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
# Sensitive variable's name is shown but not its value.
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k in self.breakfast_data:
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# Non-sensitive POST parameters' values are shown.
self.assertContains(response, 'baked-beans-value', status_code=500)
self.assertContains(response, 'hash-brown-value', status_code=500)
# Sensitive POST parameters' values are not shown.
self.assertNotContains(response, 'sausage-value', status_code=500)
self.assertNotContains(response, 'bacon-value', status_code=500)
def verify_paranoid_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that no variables or POST parameters are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Show variable names but not their values.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertNotContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# No POST parameters' values are shown.
self.assertNotContains(response, v, status_code=500)
def verify_unsafe_email(self, view, check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = str(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = str(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertIn(k, body_plain)
self.assertIn(v, body_plain)
self.assertIn(k, body_html)
self.assertIn(v, body_html)
def verify_safe_email(self, view, check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = str(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = str(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertNotIn('worcestershire', body_html)
if check_for_POST_params:
for k in self.breakfast_data:
# All POST parameters' names are shown.
self.assertIn(k, body_plain)
# Non-sensitive POST parameters' values are shown.
self.assertIn('baked-beans-value', body_plain)
self.assertIn('hash-brown-value', body_plain)
self.assertIn('baked-beans-value', body_html)
self.assertIn('hash-brown-value', body_html)
# Sensitive POST parameters' values are not shown.
self.assertNotIn('sausage-value', body_plain)
self.assertNotIn('bacon-value', body_plain)
self.assertNotIn('sausage-value', body_html)
self.assertNotIn('bacon-value', body_html)
def verify_paranoid_email(self, view):
"""
Asserts that no variables or POST parameters are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body = str(email.body)
self.assertNotIn('cooked_eggs', body)
self.assertNotIn('scrambled', body)
self.assertNotIn('sauce', body)
self.assertNotIn('worcestershire', body)
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body)
# No POST parameters' values are shown.
self.assertNotIn(v, body)
@override_settings(ROOT_URLCONF='view_tests.urls')
class ExceptionReporterFilterTests(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Sensitive information can be filtered out of error reports (#14614).
"""
rf = RequestFactory()
def test_non_sensitive_request(self):
"""
Everything (request info and frame variables) can bee seen
in the default error reports for non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
def test_sensitive_request(self):
"""
Sensitive POST parameters and frame variables cannot be
seen in the default error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view)
self.verify_unsafe_email(sensitive_view)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view)
self.verify_safe_email(sensitive_view)
def test_paranoid_request(self):
"""
No POST parameters and frame variables can be seen in the
default error reports for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view)
self.verify_unsafe_email(paranoid_view)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view)
self.verify_paranoid_email(paranoid_view)
def test_multivalue_dict_key_error(self):
"""
#21098 -- Sensitive POST parameters cannot be seen in the
error reports for if request.POST['nonexistent_key'] throws an error.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(multivalue_dict_key_error)
self.verify_unsafe_email(multivalue_dict_key_error)
with self.settings(DEBUG=False):
self.verify_safe_response(multivalue_dict_key_error)
self.verify_safe_email(multivalue_dict_key_error)
def test_custom_exception_reporter_filter(self):
"""
It's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
def test_sensitive_method(self):
"""
The sensitive_variables decorator works with object methods.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_method_view, check_for_POST_params=False)
self.verify_unsafe_email(sensitive_method_view, check_for_POST_params=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_method_view, check_for_POST_params=False)
self.verify_safe_email(sensitive_method_view, check_for_POST_params=False)
def test_sensitive_function_arguments(self):
"""
Sensitive variables don't leak in the sensitive_variables decorator's
frame, when those variables are passed as arguments to the decorated
function.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_args_function_caller)
self.verify_unsafe_email(sensitive_args_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_args_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_args_function_caller, check_for_POST_params=False)
def test_sensitive_function_keyword_arguments(self):
"""
Sensitive variables don't leak in the sensitive_variables decorator's
frame, when those variables are passed as keyword arguments to the
decorated function.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_kwargs_function_caller)
self.verify_unsafe_email(sensitive_kwargs_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_kwargs_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_kwargs_function_caller, check_for_POST_params=False)
def test_callable_settings(self):
"""
Callable settings should not be evaluated in the debug page (#21345).
"""
def callable_setting():
return "This should not be displayed"
with self.settings(DEBUG=True, FOOBAR=callable_setting):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_callable_settings_forbidding_to_set_attributes(self):
"""
Callable settings which forbid to set attributes should not break
the debug page (#23070).
"""
class CallableSettingWithSlots:
__slots__ = []
def __call__(self):
return "This should not be displayed"
with self.settings(DEBUG=True, WITH_SLOTS=CallableSettingWithSlots()):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_dict_setting_with_non_str_key(self):
"""
A dict setting containing a non-string key should not break the
debug page (#12744).
"""
with self.settings(DEBUG=True, FOOBAR={42: None}):
response = self.client.get('/raises500/')
self.assertContains(response, 'FOOBAR', status_code=500)
def test_sensitive_settings(self):
"""
The debug page should not show some sensitive settings
(password, secret key, ...).
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
with self.settings(DEBUG=True, **{setting: "should not be displayed"}):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
def test_settings_with_sensitive_keys(self):
"""
The debug page should filter out some sensitive information found in
dict settings.
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
FOOBAR = {
setting: "should not be displayed",
'recursive': {setting: "should not be displayed"},
}
with self.settings(DEBUG=True, FOOBAR=FOOBAR):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
class AjaxResponseExceptionReporterFilter(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Sensitive information can be filtered out of error reports.
Here we specifically test the plain text 500 debug-only error page served
when it has been detected the request was sent by JS code. We don't check
for (non)existence of frames vars in the traceback information section of
the response content because we don't include them in these error pages.
Refs #14614.
"""
rf = RequestFactory(HTTP_X_REQUESTED_WITH='XMLHttpRequest')
def test_non_sensitive_request(self):
"""
Request info can bee seen in the default error reports for
non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
def test_sensitive_request(self):
"""
Sensitive POST parameters cannot be seen in the default
error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view, check_for_vars=False)
def test_paranoid_request(self):
"""
No POST parameters can be seen in the default error reports
for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view, check_for_vars=False)
def test_custom_exception_reporter_filter(self):
"""
It's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False)
@override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls')
def test_ajax_response_encoding(self):
response = self.client.get('/raises500/', HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response['Content-Type'], 'text/plain; charset=utf-8')
class HelperFunctionTests(SimpleTestCase):
def test_cleanse_setting_basic(self):
self.assertEqual(cleanse_setting('TEST', 'TEST'), 'TEST')
self.assertEqual(cleanse_setting('PASSWORD', 'super_secret'), CLEANSED_SUBSTITUTE)
def test_cleanse_setting_ignore_case(self):
self.assertEqual(cleanse_setting('password', 'super_secret'), CLEANSED_SUBSTITUTE)
def test_cleanse_setting_recurses_in_dictionary(self):
initial = {'login': 'cooper', 'password': 'secret'}
expected = {'login': 'cooper', 'password': CLEANSED_SUBSTITUTE}
self.assertEqual(cleanse_setting('SETTING_NAME', initial), expected)
| georgemarshall/django | tests/view_tests/tests/test_debug.py | Python | bsd-3-clause | 53,853 |
import math
ceil = lambda f: int(math.ceil(f))
from gobject import *
import gtk
import cairo
import pango
line = 10
curve = 60
dotSmall = 14
dotLarge = 24
lineprc = 1/7.
hpadding = 5
vpadding = 3
class SpotGraph (gtk.EventBox):
__gsignals__ = {
'spotClicked' : (SIGNAL_RUN_FIRST, TYPE_NONE, (str,))
}
def __init__ (self):
gtk.EventBox.__init__(self)
self.connect("expose_event", self.expose)
self.typeColors = [[[85, 152, 215], [59, 106, 151]],
[[115, 210, 22], [78, 154, 6]]]
for type in self.typeColors:
for color in type:
color[0] = color[0]/255.
color[1] = color[1]/255.
color[2] = color[2]/255.
self.add_events( gtk.gdk.LEAVE_NOTIFY_MASK |
gtk.gdk.POINTER_MOTION_MASK |
gtk.gdk.BUTTON_PRESS_MASK |
gtk.gdk.BUTTON_RELEASE_MASK )
self.connect("button_press_event", self.button_press)
self.connect("button_release_event", self.button_release)
self.connect("motion_notify_event", self.motion_notify)
self.connect("leave_notify_event", self.motion_notify)
self.connect("size-allocate", self.size_allocate)
self.cords = []
self.hovered = None
self.pressed = False
self.spots = {}
self.spotQueue = [] # For spots added prior to widget allocation
self.xmarks = []
self.ymarks = []
self.set_visible_window(False)
############################################################################
# Drawing #
############################################################################
def redraw_canvas(self, rect=None):
if self.window:
if not rect:
alloc = self.get_allocation()
rect = (0, 0, alloc.width, alloc.height)
rect = gtk.gdk.Rectangle(*map(int,rect))
self.window.invalidate_rect(rect, True)
self.window.process_updates(True)
def expose(self, widget, event):
context = widget.window.cairo_create()
self.draw(context, event.area)
return False
def draw (self, context, r):
alloc = self.get_allocation()
width = alloc.width
height = alloc.height
#------------------------------------------------------ Paint side ruler
context.move_to(alloc.x+line, alloc.y+line)
context.rel_line_to(0, height-line*2-curve)
context.rel_curve_to(0, curve, 0, curve, curve, curve)
context.rel_line_to(width-line*2-curve, 0)
context.set_line_width(line)
context.set_line_cap(cairo.LINE_CAP_ROUND)
state = self.state == gtk.STATE_NORMAL and gtk.STATE_PRELIGHT or self.state
context.set_source_color(self.get_style().dark[state])
context.stroke()
#------------------------------------------------ Paint horizontal marks
for x, title in self.xmarks:
context.set_source_color(self.get_style().fg[self.state])
context.set_font_size(12)
x, y = self.prcToPix (x, 1)
context.move_to (x+line/2., y-line/2.)
context.rotate(-math.pi/2)
context.show_text(title)
context.rotate(math.pi/2)
context.set_source_color(self.get_style().bg[self.state])
context.move_to (x-line/2., y)
context.rel_curve_to (6, 0, 6, line, 6, line)
context.rel_curve_to (0, -line, 6, -line, 6, -line)
context.close_path()
context.fill()
#-------------------------------------------------- Paint vertical marks
for y, title in self.ymarks:
context.set_source_color(self.get_style().fg[self.state])
context.set_font_size(12)
x, y = self.prcToPix (0, y)
context.move_to (x+line/2., y+line/2.)
context.show_text(title)
context.set_source_color(self.get_style().bg[self.state])
context.move_to (x, y-line/2.)
context.rel_curve_to (0, 6, -line, 6, -line, 6)
context.rel_curve_to (line, 0, line, 6, line, 6)
context.close_path()
context.fill()
#----------------------------------------------------------- Paint spots
context.set_line_width(dotSmall*lineprc)
for x, y, type, name, text in self.spots.values():
context.set_source_rgb(*self.typeColors[type][0])
if self.hovered and name == self.hovered[3]:
continue
x, y = self.prcToPix (x, y)
context.arc(x, y, dotSmall/(1+lineprc)/2., 0, 2 * math.pi)
context.fill_preserve()
context.set_source_rgb(*self.typeColors[type][1])
context.stroke()
#--------------------------------------------------- Paint hovered spots
context.set_line_width(dotLarge*lineprc)
if self.hovered:
x, y, type, name, text = self.hovered
x, y = self.prcToPix (x, y)
if not self.pressed:
context.set_source_rgb(*self.typeColors[type][0])
else:
context.set_source_rgb(*self.typeColors[type][1])
context.arc(x, y, dotLarge/(1+lineprc)/2., 0, 2 * math.pi)
context.fill_preserve()
context.set_source_rgb(*self.typeColors[type][1])
context.stroke()
x, y, width, height = self.getTextBounds(self.hovered)
self.get_style().paint_flat_box (self.window,
gtk.STATE_NORMAL, gtk.SHADOW_NONE, r, self, "tooltip",
int(x-hpadding), int(y-vpadding),
ceil(width+hpadding*2), ceil(height+vpadding*2))
context.move_to(x, y)
context.set_source_color(self.get_style().fg[self.state])
context.show_layout(self.create_pango_layout(text))
############################################################################
# Events #
############################################################################
def button_press (self, widget, event):
alloc = self.get_allocation()
self.cords = [event.x+alloc.x, event.y+alloc.y]
self.pressed = True
if self.hovered:
self.redraw_canvas(self.getBounds(self.hovered))
def button_release (self, widget, event):
alloc = self.get_allocation()
self.cords = [event.x+alloc.x, event.y+alloc.y]
self.pressed = False
if self.hovered:
self.redraw_canvas(self.getBounds(self.hovered))
if self.pointIsOnSpot (event.x+alloc.x, event.y+alloc.y, self.hovered):
self.emit("spotClicked", self.hovered[3])
def motion_notify (self, widget, event):
alloc = self.get_allocation()
self.cords = [event.x+alloc.x, event.y+alloc.y]
spot = self.getSpotAtPoint (*self.cords)
if self.hovered and spot == self.hovered:
return
if self.hovered:
bounds = self.getBounds(self.hovered)
self.hovered = None
self.redraw_canvas(bounds)
if spot:
self.hovered = spot
self.redraw_canvas(self.getBounds(self.hovered))
def size_allocate (self, widget, allocation):
assert self.get_allocation().width > 1
for spot in self.spotQueue:
self.addSpot(*spot)
del self.spotQueue[:]
############################################################################
# Interaction #
############################################################################
def addSpot (self, name, text, x0, y0, type=0):
""" x and y are in % from 0 to 1 """
assert type in range(len(self.typeColors))
if self.get_allocation().width <= 1:
self.spotQueue.append((name, text, x0, y0, type))
return
x1, y1 = self.getNearestFreeNeighbourHexigon(x0, 1-y0)
spot = (x1, y1, type, name, text)
self.spots[name] = spot
if not self.hovered and self.cords and \
self.pointIsOnSpot (self.cords[0], self.cords[1], spot):
self.hovered = spot
self.redraw_canvas(self.getBounds(spot))
def removeSpot (self, name):
if not name in self.spots:
return
spot = self.spots.pop(name)
bounds = self.getBounds(spot)
if spot == self.hovered:
self.hovered = None
self.redraw_canvas(bounds)
def clearSpots (self):
self.hovered = None
self.spots.clear()
self.redraw_canvas()
self.redraw_canvas()
def addXMark (self, x, title):
self.xmarks.append( (x, title) )
def addYMark (self, y, title):
self.ymarks.append( (1-y, title) )
############################################################################
# Internal stuff #
############################################################################
def getTextBounds (self, spot):
x, y, type, name, text = spot
x, y = self.prcToPix (x, y)
alloc = self.get_allocation()
width = alloc.width
height = alloc.height
extends = self.create_pango_layout(text).get_extents()
scale = float(pango.SCALE)
x_bearing, y_bearing, twidth, theight = [e/scale for e in extends[1]]
tx = x - x_bearing + dotLarge/2.
ty = y - y_bearing - theight - dotLarge/2.
if tx + twidth > width and x - x_bearing - twidth - dotLarge/2. > alloc.x:
tx = x - x_bearing - twidth - dotLarge/2.
if ty < alloc.y:
ty = y - y_bearing + dotLarge/2.
return (tx, ty, twidth, theight)
def join (self, r0, r1):
x1 = min(r0[0], r1[0])
x2 = max(r0[0]+r0[2], r1[0]+r1[2])
y1 = min(r0[1], r1[1])
y2 = max(r0[1]+r0[3], r1[1]+r1[3])
return (x1, y1, x2 - x1, y2 - y1)
def getBounds (self, spot):
x, y, type, name, text = spot
x, y = self.prcToPix (x, y)
if spot == self.hovered:
size = dotLarge
else: size = dotSmall
bounds = (x-size/2.-1, y-size/2.-1, size+2, size+2)
if spot == self.hovered:
x, y, w, h = self.getTextBounds(spot)
tbounds = (x-hpadding, y-vpadding, w+hpadding*2+1, h+vpadding*2+1)
return self.join(bounds, tbounds)
return bounds
def getNearestFreeNeighbourHexigon (self, xorg, yorg):
""" This method performs an hexigon search for an empty place to put a
new dot. """
x, y = self.prcToPix (xorg, yorg)
# Start by testing current spot
if self.isEmpty (x, y):
return xorg, yorg
directions = [(math.cos((i+2)*math.pi/3),
math.sin((i+2)*math.pi/3)) for i in xrange(6)]
level = 1
while True:
x += dotSmall
for dx, dy in directions:
for i in xrange(level):
if self.isEmpty (x, y):
return self.pixToPrc (x, y)
x += dx*dotSmall
y += dy*dotSmall
level += 1
def getNearestFreeNeighbourArchi (self, xorg, yorg):
""" This method performs an archimedes-spircal search for an empty
place to put a new dot.
http://en.wikipedia.org/wiki/Archimedean_spiral """
xorg, yorg = self.prcToPix (xorg, yorg)
# Start by testing current spot
if self.isEmpty (xorg, yorg):
return self.pixToPrc (xorg, yorg)
r = 0
while True:
# This is an approx to the equation
# cos((r-s)/(2pi)) = (r^2+s^2-1)/(2*r*s)
# which gives the next point on the spiral 1 away.
r = (4*math.pi**3*r + r**2 + math.sqrt(16*math.pi**6 +
8*math.pi**3*r + r**4)) / (4*math.pi**3 + 2*r)
x = r*math.cos(r)/(4*math.pi)*dotSmall + xorg
y = r*math.sin(r)/(4*math.pi)*dotSmall + yorg
if self.isEmpty (x, y):
return self.pixToPrc (x, y)
def getNearestFreeNeighbourSquare (self, xorg, yorg):
""" This method performs a spircal search for an empty square to put a
new dot. """
up = 2
right = 1
down = 1
left = 2
x, y = self.prcToPix (xorg, yorg)
# Start by testing current spot
if self.isEmpty (x, y):
return self.pixToPrc (x, y)
while True:
for i in range(right):
x += dotSmall
if self.isEmpty (x, y):
return self.pixToPrc (x, y)
for i in range(down):
y += dotSmall
if self.isEmpty (x, y):
return self.pixToPrc (x, y)
for i in range(left):
x -= dotSmall
if self.isEmpty (x, y):
return self.pixToPrc (x, y)
for i in range(up):
y -= dotSmall
if self.isEmpty (x, y):
return self.pixToPrc (x, y)
# Grow spiral bounds
right += 2
down += 2
left += 2
up += 2
def isEmpty (self, x0, y0):
""" Returns true if a spot placed on (x, y) is inside the graph and not
intersecting with other spots.
x and y should be in pixels, not percent """
# Make sure spiral search don't put dots outside the graph
x, y = self.prcToPix(0,0)
w, h = self.prcToPix(1,1)
if not x <= x0 <= w or not y <= y0 <= h:
return False
# Tests if the spot intersects any other spots
for x1, y1, type, name, text in self.spots.values():
x1, y1 = self.prcToPix(x1, y1)
if (x1-x0)**2 + (y1-y0)**2 < dotSmall**2 - 0.1:
return False
return True
def pointIsOnSpot (self, x0, y0, spot):
""" Returns true if (x, y) is inside the spot 'spot'. The size of the
spot is determined based on its hoverness.
x and y should be in pixels, not percent """
if spot == self.hovered:
size = dotLarge
else: size = dotSmall
x1, y1, type, name, text = spot
x1, y1 = self.prcToPix(x1, y1)
if (x1-x0)**2 + (y1-y0)**2 <= (size/2.)**2:
return True
return False
def getSpotAtPoint (self, x, y):
""" Returns the spot embrace (x, y) if any. Otherwise it returns None.
x and y should be in pixels, not percent """
if self.hovered and self.pointIsOnSpot(x, y, self.hovered):
return self.hovered
for spot in self.spots.values():
if spot == self.hovered:
continue
if self.pointIsOnSpot(x, y, spot):
return spot
return None
def prcToPix (self, x, y):
""" Translates from 0-1 cords to real world cords """
alloc = self.get_allocation()
return x*(alloc.width - line*1.5-dotLarge*0.5) + line*1.5 + alloc.x, \
y*(alloc.height - line*1.5-dotLarge*0.5) + dotLarge*0.5 + alloc.y
def pixToPrc (self, x, y):
""" Translates from real world cords to 0-1 cords """
alloc = self.get_allocation()
return (x - line*1.5 - alloc.x)/(alloc.width - line*1.5-dotLarge*0.5), \
(y - dotLarge*0.5 - alloc.y)/(alloc.height - line*1.5-dotLarge*0.5)
if __name__ == "__main__":
w = gtk.Window()
nb = gtk.Notebook()
w.add(nb)
vb = gtk.VBox()
nb.append_page(vb)
sg = SpotGraph()
sg.addXMark(.5, "Center")
sg.addYMark(.5, "Center")
vb.pack_start(sg)
button = gtk.Button("New Spot")
def callback (button):
if not hasattr(button, "nextnum"):
button.nextnum = 0
else: button.nextnum += 1
sg.addSpot(str(button.nextnum), "Blablabla", 1, 1, 0)
button.connect("clicked", callback)
vb.pack_start(button, expand=False)
w.connect("delete-event", gtk.main_quit)
w.show_all()
w.resize(400,400)
gtk.main()
| jskurka/PyChess-Learning-Module | lib/pychess/widgets/SpotGraph.py | Python | gpl-3.0 | 17,065 |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""gcloud dns record-sets export command."""
from googlecloudsdk.api_lib.dns import export_util
from googlecloudsdk.api_lib.dns import util
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.util import files
from googlecloudsdk.third_party.apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.third_party.apitools.base.py import list_pager
class Export(base.Command):
"""Export your record-sets into a file.
This command exports the record-sets contained within the specified
managed-zone into a file.
"""
detailed_help = {
'DESCRIPTION': '{description}',
'EXAMPLES': """\
To export record-sets into a yaml file, run:
$ {command} YAML_RECORDS_FILE -z MANAGED_ZONE
To import record-sets into a zone file, run:
$ {command} ZONE_FILE --zone-file-format -z MANAGED_ZONE
""",
}
@staticmethod
def Args(parser):
util.ZONE_FLAG.AddToParser(parser)
parser.add_argument('records_file',
help='File to which record-sets should be exported.')
parser.add_argument(
'--zone-file-format',
required=False,
action='store_true',
help='Indicates that records-file should be in the zone file format.')
@util.HandleHttpError
def Run(self, args):
dns = self.context['dns_client']
messages = self.context['dns_messages']
resources = self.context['dns_resources']
project_id = properties.VALUES.core.project.Get(required=True)
# Get the managed-zone.
zone_ref = resources.Parse(args.zone, collection='dns.managedZones')
try:
zone = dns.managedZones.Get(zone_ref.Request())
except apitools_exceptions.HttpError as error:
raise exceptions.HttpException(util.GetErrorMessage(error))
# Get all the record-sets.
record_sets = []
for record_set in list_pager.YieldFromList(
dns.resourceRecordSets,
messages.DnsResourceRecordSetsListRequest(project=project_id,
managedZone=zone_ref.Name()),
field='rrsets'):
record_sets.append(record_set)
# Export the record-sets.
try:
with files.Context(open(args.records_file, 'w')) as export_file:
if args.zone_file_format:
export_util.WriteToZoneFile(export_file, record_sets, zone.dnsName)
else:
export_util.WriteToYamlFile(export_file, record_sets)
except Exception as exp:
msg = 'unable to export record-sets to file [{0}]: {1}'.format(
args.records_file, exp)
raise exceptions.ToolException(msg)
log.status.Print('Exported record-sets to [{0}].'.format(args.records_file))
| flgiordano/netcash | +/google-cloud-sdk/lib/surface/dns/record_sets/export.py | Python | bsd-3-clause | 3,429 |
#!/usr/bin/env python
#
# Copyright (c) 2001-2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""
Test the base_dir argument for the HTMLHELP builder while using
the xsltproc executable, if it exists.
"""
import os
import sys
import TestSCons
test = TestSCons.TestSCons()
xsltproc = test.where_is('xsltproc')
if not (xsltproc and
os.path.isdir('/usr/share/xml/docbook/stylesheet/docbook-xsl')):
test.skip_test('No xsltproc or no stylesheets installed, skipping test.\n')
test.dir_fixture('image')
# Normal invocation
test.run(arguments=['-f','SConstruct.cmd'], stderr=None)
test.must_exist(test.workpath('output/index.html'))
test.must_exist(test.workpath('htmlhelp.hhp'))
test.must_exist(test.workpath('toc.hhc'))
# Cleanup
test.run(arguments=['-f','SConstruct.cmd','-c'])
test.must_not_exist(test.workpath('output/index.html'))
test.must_not_exist(test.workpath('htmlhelp.hhp'))
test.must_not_exist(test.workpath('toc.hhc'))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| andrewyoung1991/scons | test/Docbook/basedir/htmlhelp/htmlhelp_cmd.py | Python | mit | 2,111 |
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# inventory cache
DOCUMENTATION = r'''
options:
aws_profile:
description: The AWS profile
type: str
aliases: [ boto_profile ]
env:
- name: AWS_PROFILE
- name: AWS_DEFAULT_PROFILE
aws_access_key:
description: The AWS access key to use.
type: str
env:
- name: AWS_ACCESS_KEY_ID
- name: AWS_ACCESS_KEY
- name: EC2_ACCESS_KEY
aws_secret_key:
description: The AWS secret key that corresponds to the access key.
type: str
env:
- name: AWS_SECRET_ACCESS_KEY
- name: AWS_SECRET_KEY
- name: EC2_SECRET_KEY
aws_security_token:
description: The AWS security token if using temporary access and secret keys.
type: str
env:
- name: AWS_SECURITY_TOKEN
- name: AWS_SESSION_TOKEN
- name: EC2_SECURITY_TOKEN
region:
description: The region for which to create the connection.
type: str
env:
- name: AWS_REGION
- name: EC2_REGION
'''
| ujenmr/ansible | lib/ansible/plugins/doc_fragments/aws_credentials.py | Python | gpl-3.0 | 1,170 |
#
# Copyright © 2012–2022 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from django.db import models
from weblate.trans.fields import RegexField
class Variant(models.Model):
component = models.ForeignKey("Component", on_delete=models.deletion.CASCADE)
variant_regex = RegexField(max_length=190, blank=True)
# This really should be a TextField, but it does not work with unique
# index and MySQL
key = models.CharField(max_length=576)
defining_units = models.ManyToManyField("Unit", related_name="defined_variants")
class Meta:
unique_together = (("key", "component", "variant_regex"),)
verbose_name = "variant definition"
verbose_name_plural = "variant definitions"
def __str__(self):
return f"{self.component}: {self.key}"
| nijel/weblate | weblate/trans/models/variant.py | Python | gpl-3.0 | 1,480 |
from decimal import ROUND_HALF_EVEN
import moneyed
from moneyed.localization import _sign, _format
_sign("en_GB", moneyed.GBP, prefix="£")
_format(
"en_GB",
group_size=3,
group_separator=",",
decimal_point=".",
positive_sign="",
trailing_positive_sign="",
negative_sign="-",
trailing_negative_sign="",
rounding_method=ROUND_HALF_EVEN,
)
| waldocollective/django-hordak | hordak/__init__.py | Python | mit | 377 |
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: junos_command
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Execute arbitrary commands on a remote device running Junos
description:
- Network devices running the Junos operating system provide a command
driven interface both over CLI and RPC. This module provides an
interface to execute commands using these functions and return the
results to the Ansible playbook. In addition, this
module can specify a set of conditionals to be evaluated against the
returned output, only returning control to the playbook once the
entire set of conditionals has been met.
extends_documentation_fragment: junos
options:
commands:
description:
- An ordered set of CLI commands to be executed on the remote
device. The output from the commands is then returned to
the playbook in the task results.
required: false
default: null
rpcs:
description:
- The C(rpcs) argument accepts a list of RPCs to be executed
over a netconf session and the results from the RPC execution
is return to the playbook via the modules results dictionary.
required: false
default: null
waitfor:
description:
- Specifies what to evaluate from the output of the command
and what conditionals to apply. This argument will cause
the task to wait for a particular conditional or set of
conditionals to be true before moving forward. If the
conditional is not true by the configured retries, the
:1
task fails. See examples.
required: false
default: null
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the I(waitfor)
conditionals.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditional, the interval indicates how to long to wait before
trying the command again.
required: false
default: 1
format:
description:
- Configures the encoding scheme to use when serializing output
from the device. This handles how to properly understand the
output and apply the conditionals path to the result set.
required: false
default: 'xml'
choices: ['xml', 'text']
requirements:
- junos-eznc
notes:
- This module requires the netconf system service be enabled on
the remote device being managed
"""
EXAMPLES = """
# the required set of connection arguments have been purposely left off
# the examples for brevity
- name: run a set of commands
junos_command:
commands: ['show version', 'show ip route']
- name: run a command with a conditional applied to the second command
junos_command:
commands:
- show version
- show interfaces fxp0
waitfor:
- "result[1].interface-information.physical-interface.name eq fxp0"
- name: collect interface information using rpc
junos_command:
rpcs:
- "get_interface_information interface=em0 media=True"
- "get_interface_information interface=fxp0 media=True"
"""
RETURN = """
stdout:
description: The output from the commands read from the device
returned: always
type: list
sample: ['...', '...']
stdout_lines:
description: The output read from the device split into lines
returned: always
type: list
sample: [['...', '...'], ['...', '...']]
xml:
description: The raw XML reply from the device
returned: when format is xml
type: list
sample: [['...', '...'], ['...', '...']]
failed_conditionals:
description: the conditionals that failed
retured: failed
type: list
sample: ['...', '...']
"""
import shlex
def split(value):
lex = shlex.shlex(value)
lex.quotes = '"'
lex.whitespace_split = True
lex.commenters = ''
return list(lex)
def rpc_args(args):
kwargs = dict()
args = split(args)
name = args.pop(0)
for arg in args:
key, value = arg.split('=')
if str(value).upper() in ['TRUE', 'FALSE']:
kwargs[key] = bool(value)
elif re.match(r'\d+', value):
kwargs[key] = int(value)
else:
kwargs[key] = str(value)
return (name, kwargs)
def parse_rpcs(rpcs):
parsed = list()
for rpc in (rpcs or list()):
parsed.append(rpc_args(rpc))
return parsed
def run_rpcs(module, items, format):
response = list()
for name, kwargs in items:
kwargs['format'] = format
result = module.connection.rpc(name, **kwargs)
if format == 'text':
response.append(result.text)
else:
response.append(result)
return response
def iterlines(stdout):
for item in stdout:
if isinstance(item, basestring):
item = str(item).split('\n')
yield item
def main():
"""main entry point for Ansible module
"""
spec = dict(
commands=dict(type='list'),
rpcs=dict(type='list'),
format=dict(default='xml', choices=['text', 'xml']),
waitfor=dict(type='list'),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int'),
transport=dict(default='netconf', choices=['netconf'])
)
mutually_exclusive = [('commands', 'rpcs')]
module = get_module(argument_spec=spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
commands = module.params['commands']
rpcs = parse_rpcs(module.params['rpcs'])
encoding = module.params['format']
retries = module.params['retries']
interval = module.params['interval']
try:
queue = set()
for entry in (module.params['waitfor'] or list()):
queue.add(Conditional(entry))
except AttributeError:
exc = get_exception()
module.fail_json(msg=exc.message)
result = dict(changed=False)
while retries > 0:
if commands:
response = module.run_commands(commands, format=encoding)
else:
response = run_rpcs(module, rpcs, format=encoding)
result['stdout'] = response
xmlout = list()
for index in range(0, len(response)):
if encoding == 'xml':
xmlout.append(xml_to_string(response[index]))
response[index] = xml_to_json(response[index])
for item in list(queue):
if item(response):
queue.remove(item)
if not queue:
break
time.sleep(interval)
retries -= 1
else:
failed_conditions = [item.raw for item in queue]
module.fail_json(msg='timeout waiting for value', failed_conditions=failed_conditions)
if xmlout:
result['xml'] = xmlout
result['stdout_lines'] = list(iterlines(result['stdout']))
module.exit_json(**result)
from ansible.module_utils.basic import *
from ansible.module_utils.netcfg import *
from ansible.module_utils.junos import *
if __name__ == '__main__':
main()
| caseylucas/ansible-modules-core | network/junos/junos_command.py | Python | gpl-3.0 | 7,935 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import wiki.plugins.images.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wiki', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('revisionplugin_ptr', models.OneToOneField(to='wiki.RevisionPlugin', primary_key=True, auto_created=True, parent_link=True, serialize=False)),
],
options={
'verbose_name': 'image',
'verbose_name_plural': 'images',
},
bases=('wiki.revisionplugin',),
),
migrations.CreateModel(
name='ImageRevision',
fields=[
('revisionpluginrevision_ptr', models.OneToOneField(to='wiki.RevisionPluginRevision', primary_key=True, auto_created=True, parent_link=True, serialize=False)),
('image', models.ImageField(null=True, blank=True, height_field='height', max_length=2000, width_field='width', upload_to=wiki.plugins.images.models.upload_path)),
('width', models.SmallIntegerField(null=True, blank=True)),
('height', models.SmallIntegerField(null=True, blank=True)),
],
options={
'verbose_name': 'image revision',
'verbose_name_plural': 'image revisions',
'ordering': ('-created',),
},
bases=('wiki.revisionpluginrevision',),
),
]
| NablaWebkom/django-wiki | wiki/plugins/images/migrations/0001_initial.py | Python | gpl-3.0 | 1,567 |
#!/usr/bin/env python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import glob
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
args = parser.parse_args()
rootdir = os.path.dirname(__file__) + "/../../"
rootdir = os.path.abspath(rootdir)
def get_refs():
refs = {}
for path in glob.glob(os.path.join(rootdir, "build/boilerplate/boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
except:
return False
data = f.read()
f.close()
extension = file_extension(filename)
ref = refs[extension]
# remove build tags from the top of Go files
if extension == "go":
p = regexs["go_build_constraints"]
(data, found) = p.subn("", data, 1)
# remove shebang from the top of shell files
if extension == "sh":
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
return False
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
p = regexs["year"]
for d in data:
if p.search(d):
return False
# Replace all occurrences of the regex "2016|2015|2014" with "YEAR"
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
if found != 0:
break
# if we don't match the reference at this point, fail
if ref != data:
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git']
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for d in skipped_dirs:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
extension = file_extension(pathname)
if extension in extensions:
outfiles.append(pathname)
return outfiles
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
regexs["year"] = re.compile('YEAR')
# dates can be 2014, 2015 or 2016, company holder names can be anything
regexs["date"] = re.compile('(2014|2015|2016)')
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE)
# strip #!.* from shell scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
return regexs
def main():
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
for filename in filenames:
if not file_passes(filename, refs, regexs):
print(filename, file=sys.stdout)
if __name__ == "__main__":
sys.exit(main())
| spacexnice/ctlplane | Godeps/_workspace/src/github.com/google/cadvisor/build/boilerplate/boilerplate.py | Python | mit | 4,734 |
#-------------------------------------------------------------------------------
#
# Copyright (c) 2006, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
# Author: David C. Morrill
# Date: 07/04/2006
#
#-------------------------------------------------------------------------------
""" Implements the FeatureTool feature that allows a dragged object
implementing the IFeatureTool interface to be dropped onto any compatible
object.
"""
#-------------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------------
from dock_window_feature \
import DockWindowFeature
from pyface.image_resource \
import ImageResource
#-------------------------------------------------------------------------------
# 'FeatureTool' class:
#-------------------------------------------------------------------------------
class FeatureTool ( DockWindowFeature ):
#---------------------------------------------------------------------------
# Trait definitions:
#---------------------------------------------------------------------------
image = ImageResource( 'feature_tool' )
#---------------------------------------------------------------------------
# Returns whether a specified object can be dropped on the feature image:
#---------------------------------------------------------------------------
def can_drop ( self, object ):
""" Returns whether a specified object can be dropped on the feature
image.
"""
return True
| pankajp/pyface | pyface/dock/feature_tool.py | Python | bsd-3-clause | 1,947 |
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from sys import argv
from optparse import make_option
import csv
from time import time
from datetime import datetime
from upload.models import ImportItem
from django.template.loader import render_to_string
from django.conf import settings
from os import path
from pyExcelerator import Workbook, XFStyle, Alignment, Font
import logging
class Command(BaseCommand):
help = '''Import items to template price'''
option_list = BaseCommand.option_list + (
make_option('--verbose', default=None, dest='verbose', type='int',
help='Verbose level 0, 1 or 2 (0 by default)'),
make_option('--whole', default=False, dest='whole', type='string',
help='wholeprice'),
)
def handle(self, *args, **options):
start_time = time()
self.options = options
if self.options['verbose'] == 2:
logging.getLogger().setLevel(logging.DEBUG)
elif self.options['verbose'] == 1:
logging.getLogger().setLevel(logging.INFO)
elif self.options['verbose'] == 0:
logging.getLogger().setLevel(logging.ERROR)
last_item = ImportItem.objects.filter(ok=True).latest('date_loaded')
if not last_item:
raise CommandError('No import files')
src = last_item.file.path
class csv_format(csv.Dialect):
delimiter = ';'
quotechar = '"'
doublequote = True
lineterminator = '\r\n'
quoting = csv.QUOTE_MINIMAL
reader = csv.reader(open(src, 'r'), dialect=csv_format)
count = 0
if self.options['verbose'] >= 1:
logging.info('Importing items')
for item in reader:
try:
self.make_item(item)
except ValueError:
if self.options['verbose'] >= 2:
logging.error('Error importing record: %s' % item)
count = count + 1
self.write_html_price()
self.write_xls_price()
def __init__(self):
self.data = {}
def _get_or_create_section(self, name):
if name not in self.data:
self.data.update({name: []})
return name
def _create_item(self, **kwds):
if int(kwds['quantity']) > 0:
self.data[kwds['parent']].append(kwds)
def make_item(self, param_list):
'''
Makes a new item in catalog
param_list = [identifier, quantity, '<section_name> <name>']
'''
options = {}
options['identifier'] = param_list[0]
options['quantity'] = param_list[1]
if self.options['whole']:
options['price'] = param_list[4]
else:
options['price'] = param_list[5]
if len(param_list) == 7:
options['barcode'] = param_list[6]
else:
options['barcode'] = None
options['name'] = param_list[3].decode('cp1251').replace('""', '"')
options['short_description'] = options['name'].split(' ').pop()
section_name = param_list[2].decode('cp1251')
section = self._get_or_create_section(section_name)
options['parent'] = section
return self._create_item(**options)
def write_html_price(self):
content = render_to_string('catalog/price.html', {'sections': self.data})
if self.options['whole']:
filename = path.join(settings.MEDIA_ROOT, 'upload/wprice.html')
else:
filename = path.join(settings.MEDIA_ROOT, 'upload/price.html')
f = open(filename, 'w')
f.write(content.encode('utf-8'))
f.close()
def write_xls_price(self):
if self.options['whole']:
filename = path.join(settings.MEDIA_ROOT, 'upload/wprice.xls')
else:
filename = path.join(settings.MEDIA_ROOT, 'upload/price.xls')
workBookDocument = Workbook()
docSheet = workBookDocument.add_sheet(u'Прайс соло-парфюм')
docSheet.col(1).width = 10000
headerFont = Font()
headerFont.bold = True
headerFont.size = 400
headerStyle = XFStyle()
headerStyle.font = headerFont
docSheet.row(0).set_style(headerStyle)
if self.options['whole']:
docSheet.write_merge(0, 0, 0, 2, u'Оптовый прайс-лист Соло-парфюм (%s)' % datetime.now().strftime('%d.%m.%Y'))
else:
docSheet.write_merge(0, 0, 0, 2, u'Прайс-лист Соло-парфюм (%s)' % datetime.now().strftime('%d.%m.%Y'))
docSheet.write(2, 0, u'Артикул')
docSheet.write(2, 1, u'Наименование', )
docSheet.write(2, 2, u'Цена')
sectionFont = Font()
sectionFont.bold = True
sectionStyle = XFStyle()
sectionStyle.font = sectionFont
align = Alignment()
align.horz = Alignment.HORZ_CENTER
sectionStyle.alignment = align
row = 3
for section in self.data.iterkeys():
docSheet.write_merge(row, row, 0, 2, section, sectionStyle)
row += 1
for item in self.data[section]:
docSheet.write(row, 0, item['identifier'])
docSheet.write(row, 1, item['name'])
docSheet.write(row, 2, item['price'])
row += 1
workBookDocument.save(filename)
| redsolution/django-catalog | catalog/contrib/defaults/management/commands/makeprice.py | Python | gpl-3.0 | 5,502 |
# Michael Cohen <scudette@users.sourceforge.net>
# David Collett <daveco@users.sourceforge.net>
#
# ******************************************************
# Version: FLAG $Version: 0.87-pre1 Date: Thu Jun 12 00:48:38 EST 2008$
# ******************************************************
#
# * This program is free software; you can redistribute it and/or
# * modify it under the terms of the GNU General Public License
# * as published by the Free Software Foundation; either version 2
# * of the License, or (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# ******************************************************
""" This module provides support for compressed file formats such as Zip and Gzip.
The scanner recurses into zip files, executing the scanner factory train on files within the ZIP file.
Note that the scanner reads the central directory to recover
compressed file offsets with in the zip file. The carver does not and
finds zip file contents where ever they appear within the zip -
hopefull the both return the same thing, but in the case of a multi
file zip file the carver will work better than the scanner.
"""
import os.path,sys,posixpath
import pyflag.pyflaglog as pyflaglog
from pyflag.Scanner import *
import zipfile,gzip,tarfile, zlib
from pyflag.FileSystem import File
import pyflag.Magic as Magic
import time,re,os
import StringIO
import pyflag.Scanner as Scanner
import gzip
import pyflag.Store as Store
import FileFormats.Zip as Zip
import pyflag.Time as Time
class ZipScan(GenScanFactory):
""" Recurse into Zip Files """
order=99
default = True
group = "CompressedFile"
def scan(self, fd, scanners, type, mime, cookie, **args):
pass
class Scan:
types = (
'application/(x-)?zip',
)
def external_process(self,fd):
""" This is run on the extracted file """
pyflaglog.log(pyflaglog.VERBOSE_DEBUG, "Decompressing Zip File %s" % fd.inode)
cache_key = "%s:%s" % (self.case , self.fd.inode)
## Try to read the fd as a zip file
z = zipfile.ZipFile(fd)
pathname, inode, inode_id = self.ddfs.lookup(inode = self.inode)
## retrieve evidence timezone, this is necessary because zip files
## store time in localtime
evidence_tz = Time.get_evidence_tz_name(self.case, self.fd)
## List all the files in the zip file:
dircount = 0
inodes = []
namelist = z.namelist()
for i in range(len(namelist)):
## Add the file into the VFS
try:
## Convert the time to case timezone
t = Time.convert(z.infolist()[i].date_time, case=self.case, evidence_tz=evidence_tz)
except:
t=0
## If the entry corresponds to just a directory we ignore it.
if not posixpath.basename(namelist[i]): continue
info = z.infolist()[i]
inode = "%s|Z%s:%s" % (self.inode,info.header_offset, info.compress_size)
inodes.append(inode)
inode_id = self.ddfs.VFSCreate(None,
inode,DB.expand("%s/%s",(pathname,namelist[i])),
size=info.file_size,
mtime=t, _fast=True)
for inode in inodes:
## Now call the scanners on this new file (FIXME limit
## the recursion level here)
fd = self.ddfs.open(inode_id = inode_id)
Scanner.scanfile(self.ddfs,fd,self.factories)
class GZScan(GenScanFactory):
""" Decompress Gzip files """
group = "CompressedFile"
class Drawer(Scanner.Drawer):
description = "Compressed file support"
name = "Compressed File"
group = "CompressedFile"
default = False
def find_gzipped_filename(self, fd, type):
match = re.search(type,'was "([^"]+)"')
if match:
return match.groups(1)
if fd.urn.endswith(".gz"):
return original_filename[:-3]
return "Uncompressed"
## For gziped files, we convert them to Image stream to provide
## seekable access - This does duplicate data but otherwise we
## would need to use a temporary file anyway
def scan(self, fd, scanners, type, mime, cookie, **args):
if "gzip" in type:
new_path = "%s/%s" % (fd.urn, self.find_gzipped_filename(fd, type))
new_fd = CacheManager.AFF4_MANAGER.create_cache_fd(self.case, new_path)
gz = gzip.GzipFile(fileobj=fd, mode='r')
while 1:
data = gz.read(1024*1024)
if not data: break
new_fd.write(data)
new_fd.close()
## Now scan the new fd
Scanner.scan_inode(self.case, new_fd.inode_id,
scanners)
class Scan:
""" If we hit a gzip file, we just create a new Inode entry in the VFS """
types = (
'application/x-gzip' ,
)
def __init__(self, inode,ddfs,outer,factories=None,fd=None):
ScanIfType.__init__(self, inode,ddfs,outer,factories,fd=fd)
self.filename = None
def process(self, data, metadata=None):
ScanIfType.process(self,data,metadata)
if not self.boring_status and not self.filename:
## We need to find the name of the original uncompressed
## file so we can set a sensible VFS file name. This is
## the algorithm used:
## 1) We try to decompress the first data block from the file to see if the original name is in the header
## 2) Failing this we check if the inodes filename ends with .gz
## 3) Failing that, we call the new file "data"
m = Magic.MagicResolver()
magic, type_mime = m.find_inode_magic(self.case, inode_id=self.fd.inode_id,
data=data[:1024])
def finish(self):
if self.filename:
self.ddfs.VFSCreate(self.inode,"G0",self.filename)
new_inode="%s|G0" % (self.inode)
## Scan the new file using the scanner train:
fd=self.ddfs.open(inode=new_inode)
Scanner.scanfile(self.ddfs,fd,self.factories)
class TarScan(GenScanFactory):
""" Recurse into Tar Files """
order=99
default = True
depends = [ 'TypeScan' ]
group = 'CompressedFile'
def destroy(self):
pass
class Scan:
types = (
'application/x-tar',
)
def external_process(self,fd):
""" This is run on the extracted file """
#Get a TarFile object - We must access a complete file
#here
fd.cache()
tar=tarfile.TarFile(fileobj=fd)
## List all the files in the tar file:
inodes = []
dircount = 0
namelist = tar.getnames()
for i in range(len(namelist)):
## If the entry corresponds to just a directory we ignore it.
if not os.path.basename(namelist[i]): continue
## Add the file into the VFS
self.ddfs.VFSCreate(
self.inode,"T%s" % i,namelist[i],
size=tar.getmember(namelist[i]).size,
_mtime=tar.getmember(namelist[i]).mtime,
uid=tar.getmember(namelist[i]).uid,
gid=tar.getmember(namelist[i]).gid,
mode=oct(tar.getmember(namelist[i]).mode),
)
new_inode="%s|T%s" % (self.inode,i)
inodes.append(new_inode)
for inode in inodes:
## Scan the new file using the scanner train:
fd=self.ddfs.open(inode=inode)
Scanner.scanfile(self.ddfs,fd,self.factories)
ZIPCACHE = Store.Store(max_size=5)
## These are the corresponding VFS modules:
class ZipFile(File):
""" A file like object to read files from within zip files.
We essentially decompress the file on the disk because the file
may be exceptionally large.
"""
specifier = 'Z'
def __init__(self, case, fd, inode):
File.__init__(self, case, fd, inode)
## Make sure our parent is cached:
self.fd.cache()
## Parse out inode - if we got the compressed length provided,
## we use that, otherwise we calculate it from the zipfile
## header
parts = inode.split('|')
ourpart = parts[-1][1:]
try:
offset, size = ourpart.split(":")
self.compressed_length = int(size)
offset = int(offset)
except:
offset = int(ourpart)
self.offset = offset
## Ensure that we can read the file header:
b = Zip.Buffer(fd=fd)[offset:]
self.header = Zip.ZipFileHeader(b)
## This is sometimes invalid and set to zero - should we query
## the db?
self.size = int(self.header['uncompr_size'])
if not self.compressed_length:
self.compressed_length = int(self.header['compr_size'])
self.type = int(self.header['compression_method'])
## Where does the data start?
self.init()
def init(self):
self.d = zlib.decompressobj(-15)
self.left_over = ''
self.blocksize = 1024*10
self.clength = self.compressed_length
offset = self.header.buffer.offset + self.header.size()
## Seek our fd to there:
self.fd.seek(offset)
def read(self,length=None):
## Call our baseclass to see if we have cached data:
try:
return File.read(self,length)
except IOError:
pass
## Read as much as possible
if length==None:
length = sys.maxint
## This is done in order to decompress the file in small
## chunks. We try to return as much data as was required
## and not much more
try:
## Consume the data left over from previous reads
result = self.left_over[:length]
self.left_over=self.left_over[length:]
## We keep reading compressed data until we can satify
## the desired length
while len(result)<length and self.clength>0:
## Read up to 1k of the file:
available_clength = min(self.blocksize,self.clength)
cdata = self.fd.read(available_clength)
self.clength -= available_clength
if self.type == Zip.ZIP_DEFLATED:
## Now Decompress that:
try:
ddata = self.d.decompress(cdata)
except:
ddata = ''
elif self.type == Zip.ZIP_STORED:
ddata = cdata
else:
raise RuntimeError("Compression method %s is not supported" % self.type)
## How much data do we require?
required_length = length - len(result)
result += ddata[:required_length]
## This will be '' if we have not finished making
## up the result, and store the rest for next time
## if we have
self.left_over = ddata[required_length:]
except (IndexError, KeyError, zipfile.BadZipfile),e:
raise IOError("Zip_File: (%s)" % e)
self.readptr += len(result)
return result
def seek(self, offset, rel=None):
File.seek(self,offset,rel)
if self.cached_fd: return
if self.readptr == 0:
self.init()
## We want to reinitialise the file pointer:
elif self.readptr!=0 and self.type == Zip.ZIP_DEFLATED:
pyflaglog.log(pyflaglog.VERBOSE_DEBUG, "Required to seek to offset %s in Zip File %s (%s,%s). This is inefficient, forcing disk caching." % (self.readptr, self.inode, offset,rel))
self.init()
self.cache()
self.seek(offset, rel)
return
def explain(self, query, result):
self.fd.explain(query, result)
result.row("Zip File", "Decompress ZipFileHeader structure at "
"offset %s with length %s" % (self.offset, self.compressed_length))
result.row("","Filename - %s" % self.header['zip_path'])
### FIXME - This is dangerous!!! We decompress into memory
class GZ_file(File):
""" A file like object to read gzipped files. """
specifier = 'G'
def __init__(self, case, fd, inode):
File.__init__(self, case, fd, inode)
self.gz = None
def read(self, length=None):
try:
return File.read(self,length)
except IOError:
pass
if not self.gz:
self.fd.seek(0)
self.gz = gzip.GzipFile(fileobj=self.fd, mode='r')
count = 0
step = 1024
result = ''
## Copy ourself into the file - This is in case we have errors
## in the file, we try to read as much as possible:
while 1:
try:
data=self.gz.read(step)
except IOError,e:
step /= 2
if step<10:
pyflaglog.log(pyflaglog.DEBUG, "Error reading from %s(%s), could only get %s bytes (wanted %s/%s)" % (self.fd.inode, e, count, length,self.size));
break
else:
continue
except Exception, e:
#pyflaglog.log(pyflaglog.WARNING, "Unable to decompress inode (%s) %s" % (self.inode, e))
break
count += len(data)
if len(data)==0: break
result+=data
return result
def seek(self,offset,rel=None):
File.seek(self,offset,rel)
if self.cached_fd: return
## If we were asked to seek in a gzip file:
if self.readptr!=0:
pyflaglog.log(pyflaglog.VERBOSE_DEBUG,"Asked to seek to %s in gzip file %s. This is expensive, caching on disk." % (self.readptr, self.inode))
self.cache()
self.seek(offset,rel)
## Force a new decompressor when rereading:
self.gz = None
def explain(self, query, result):
self.fd.explain(query, result)
result.row("Gzip File", "Use Gzip algorithm to decompress %s" % self.fd.inode)
class DeflateFile(GZ_file):
""" A File like object to read deflated files """
specifier = "d"
def read(self, length=None):
try:
return File.read(self,length)
except IOError:
pass
if not self.gz:
self.fd.seek(0)
self.gz = gzip.zlib.decompressobj(-15)
count = 0
step = 1024
result = ''
## Copy ourself into the file - This is in case we have errors
## in the file, we try to read as much as possible:
while 1:
try:
data=self.gz.decompress(self.fd.read(step))
except IOError,e:
step /= 2
if step<10:
pyflaglog.log(pyflaglog.DEBUG, "Error reading from %s, could only get %s bytes" % (self.fd.inode, count));
break
else:
continue
except Exception, e:
pyflaglog.log(pyflaglog.WARNING, "Unable to decompress inode %s" % e)
break
count += len(data)
if len(data)==0: break
result+=data
return result
def explain(self, query, result):
self.fd.explain(query, result)
result.row("Gzip File", "Use deflate algorithm to decompress %s" % self.fd.inode)
class Tar_file(File):
""" A file like object to read files from within tar files. Note that the tar file is specified as an inode in the DBFS """
specifier = 'T'
def __init__(self, case, fd, inode):
File.__init__(self, case, fd, inode)
## Tar file handling requires repeated access into the tar
## file. Caching our input fd really helps to speed things
## up...
fd.cache()
# strategy:
# inode is the index into the namelist of the tar file (i hope this is consistant!!)
# just read that file!
parts = inode.split('|')
try:
t = ZIPCACHE.get(self.fd.inode)
except (AttributeError, KeyError):
try:
t = tarfile.TarFile(name='/', fileobj=fd)
ZIPCACHE.put(t, key=self.fd.inode)
except tarfile.CompressionError,e:
raise IOError("Tar file: %s" % e)
try:
name=t.getnames()[int(parts[-1][1:])]
self.data = t.extractfile(name).read()
except (IndexError, KeyError):
raise IOError, "Tar_File: cant find index"
self.readptr=0
self.size=t.getmember(name).size
def read(self,len=None):
## Call our baseclass to see if we have cached data:
try:
return File.read(self,len)
except IOError:
pass
if len:
temp=self.data[self.readptr:self.readptr+len]
self.readptr+=len
return temp
else: return self.data
def close(self):
pass
## These are characters which are invalid for a file name
# invalid_filename = re.compile('[^a-zA-Z0-9!@#$%^&()_+-=*{}\\|]')
# class ZipFileCarver(Scanner.Carver):
# """ This is a special carver for zip files """
# regexs = ['PK\x03\x04']
# def add_inode(self, fd, offset, factories):
# """ We think we have a zip file here. """
# b = Zip.Buffer(fd=fd)[offset:]
# try:
# header = Zip.ZipFileHeader(b)
# size = int(header['uncompr_size'])
# compressed_length = int(header['compr_size'])
# ## Some zip programs seem to leave this at 0 - because its
# ## already in the central directory. Unfortunately the
# ## carver currently does not look at the central directory
# ## - so we just make it a reasonable value
# if compressed_length==0:
# compressed_length = 100*1024
# name = header['zip_path'].get_value()
# if len(name)==0 or invalid_filename.search(name):
# pyflaglog.log(pyflaglog.DEBUG, "Thought the name %r is invalid - skipping file" % name[:10])
# return 10
# header_offset = header['data'].buffer.offset
# except:
# return 10
# new_inode = "%s|Z%s:%s" % (fd.inode, offset, compressed_length)
# self._add_inode(new_inode, size, name, fd, factories)
# return size
## UnitTests:
import unittest
import pyflag.pyflagsh as pyflagsh
import pyflag.tests
# class ZipScanTest(pyflag.tests.ScannerTest):
# """ Zip File handling Tests """
# test_file = "pyflag_stdimage_0.4.e01"
# subsystem = 'EWF'
# offset = "16128s"
# def test_type_scan(self):
# """ Check the Zip scanner works """
# dbh = DB.DBO(self.test_case)
# env = pyflagsh.environment(case=self.test_case)
# pyflagsh.shell_execv(env=env, command="scan",
# argv=["*",'ZipScan','GZScan','TarScan','TypeScan'])
# dbh.execute("select count(*) as count from inode where inode like '%%|Z%%'")
# count = dbh.fetch()['count']
# self.failIf(count==0, "Could not find any zip files?")
# dbh.execute("select count(*) as count from inode where inode like '%%|G0'")
# count = dbh.fetch()['count']
# self.failIf(count==0, "Could not find any gzip files?")
# ## FIXME: No tar files in the test image
# #dbh.execute("select count(*) as count from inode where inode like '%|T%'")
# #count = dbh.fetch()['count']
# #self.failIf(count==0, "Could not find any tar files?")
import pyflag.tests
class ZipScanTest2(pyflag.tests.ScannerTest):
""" Test handling of zip bombs """
test_case = "PyFlagTestCase"
test_file = "gmail_cache.zip"
def test01RunScanner(self):
""" Test Zip scanner """
env = pyflagsh.environment(case=self.test_case)
pyflagsh.shell_execv(env=env, command="scan",
argv=["*",'ZipScan'])
| backupManager/pyflag | src/plugins/DiskForensics/FileHandlers/ZipFile.py | Python | gpl-2.0 | 21,412 |
'''
Kendrick Server
The kendrick http server is extended from the Tornado project. It serves two purposes
=>To serve static files directly, acts exactly like a connection object.
=>To act as a "proxy" that gives you a particular url to that simply maps to the real url, which is defined
by the connection.
'''
import tornado.ioloop
import tornado.web
import threading
import urllib
from lib.rwlock import RWLock
from requests.packages import urllib3
server_lock = RWLock()
mapping = {}
class ImageProxyHandler(tornado.web.RequestHandler):
def get(self):
connection = self.get_argument("c")
title = self.get_argument("t")
server_lock.reader_acquire()
dat_arr = mapping[title]
server_lock.reader_release()
html_file = urllib.urlopen(dat_arr[connection])
self.write(html_file)
self.flush()
class KendrickServer(tornado.web.Application):
def __init__(self):
super.__init__([
(r"/image", ImageProxyHandler)
])
def addToMapping(self, webpage):
mapping[webpage.title] = 0
def removeFromMapping(self, webpage):
del mapping[webpage.title]
def updateMapping(self, webpage, urls):
server_lock.writer_acquire()
mapping[webpage.title] = urls
server_lock.writer_release()
def start(self):
self.listen(8888)
tornado.ioloop.IOLoop.instance().start() | broadmarkio/kendrick | kendrick/server.py | Python | gpl-2.0 | 1,564 |
#!/usr/bin/env python
class SplitResultResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
'Result': 'SplitResult',
'Code': 'str',
'Status': 'str'
}
self.attributeMap = {
'Result': 'Result','Code': 'Code','Status': 'Status'}
self.Result = None # SplitResult
self.Code = None # str
self.Status = None # str
| sohail-aspose/Aspose_Pdf_Cloud | SDKs/Aspose.Pdf_Cloud_SDK_for_Python/asposepdfcloud/models/SplitResultResponse.py | Python | mit | 788 |
"""
HTTP server that responses with delays used for tests.
Example usage:
python tests/slow_server.py [HOST:PORT]
- run HTTP Server, HOST and PORT are optional
python tests/slow_server.py [HOST:PORT] True
- run IMMORTAL server (stopping process only by SIGKILL)
"""
import ast
import sys
import os
import time
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import parse_qs
sys.path.append(os.getcwd()) # noqa
# pylint:disable=wrong-import-position
from tests.signals import block_signals
# pylint:enable=wrong-import-position
class SlowServerHandler(BaseHTTPRequestHandler):
"""Slow server handler."""
timeout = 2
endtime = None
def do_GET(self) -> None: # pylint:disable=invalid-name
"""Serve GET request."""
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(b"Hi. I am very slow.")
def do_HEAD(self) -> None: # pylint:disable=invalid-name
"""
Serve HEAD request.
but count to wait and return 500 response if wait time not exceeded
due to the fact that HTTPServer will hang waiting for response
to return otherwise if none response will be returned.
"""
self.timeout_status()
self.end_headers()
def timeout_status(self) -> None:
"""Set proper response status based on timeout."""
if self.count_timeout():
self.send_response(200)
else:
self.send_response(500)
def count_timeout(self) -> bool: # pylint: disable=no-self-use
"""Count down the timeout time."""
if SlowServerHandler.endtime is None:
SlowServerHandler.endtime = time.time() + SlowServerHandler.timeout
return time.time() >= SlowServerHandler.endtime
class SlowGetServerHandler(SlowServerHandler):
"""Responds only on GET after a while."""
def do_GET(self) -> None: # pylint:disable=invalid-name
"Serve GET request."
self.timeout_status()
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(b"Hi. I am very slow.")
def do_HEAD(self) -> None: # pylint:disable=invalid-name
"Serve HEAD request."
self.send_response(500)
self.end_headers()
class SlowPostServerHandler(SlowServerHandler):
"""Responds only on POST after a while."""
def do_POST(self) -> None: # pylint:disable=invalid-name
"Serve POST request."
self.timeout_status()
self.end_headers()
self.wfile.write(b"Hi. I am very slow.")
def do_HEAD(self) -> None: # pylint:disable=invalid-name
"Serve HEAD request."
self.send_response(500)
self.end_headers()
class SlowPostKeyServerHandler(SlowServerHandler):
"""Responds only on POST after a while."""
def do_POST(self) -> None: # pylint:disable=invalid-name
"Serve POST request."
content_len = int(self.headers.get("Content-Length"))
post_body = self.rfile.read(content_len)
form = parse_qs(post_body)
if form.get(b"key") == [b"hole"]:
self.timeout_status()
else:
self.send_response(500)
self.end_headers()
self.wfile.write(b"Hi. I am very slow.")
def do_HEAD(self) -> None: # pylint:disable=invalid-name
"Serve HEAD request."
self.send_response(500)
self.end_headers()
HANDLERS = {
"HEAD": SlowServerHandler,
"GET": SlowGetServerHandler,
"POST": SlowPostServerHandler,
"Key": SlowPostKeyServerHandler,
}
if __name__ == "__main__":
HOST, PORT, IMMORTAL, METHOD = "127.0.0.1", "8000", "False", "HEAD"
if len(sys.argv) >= 2:
HOST, PORT = sys.argv[1].split(":")
if len(sys.argv) >= 3:
IMMORTAL = sys.argv[2]
if len(sys.argv) == 4:
METHOD = sys.argv[3]
if ast.literal_eval(IMMORTAL):
block_signals()
server = HTTPServer(
(HOST, int(PORT)), HANDLERS[METHOD]
) # pylint: disable=invalid-name
print(f"Starting slow server on {HOST}:{PORT}...")
server.serve_forever()
| ClearcodeHQ/mirakuru | tests/server_for_tests.py | Python | lgpl-3.0 | 4,189 |
import argparse
import pandas
parser = argparse.ArgumentParser(description="Subsets a checkm tab-separated outfile to include only entries that have the specified completeness/contamination level")
parser.add_argument("-checkm", help="the checkm out file", required=True)
parser.add_argument("-completeness", help="completeness value")
parser.add_argument("-comp_metric", help="the comparison to completeness to select [%(default)s]", choices=["<", "<=", "=", ">=", ">"], default=">=")
parser.add_argument("-contamination", help="contamination value")
parser.add_argument("-cont_metric", help="the comparison to contamination to select [%(default)s]", choices=["<", "<=", "=", ">=", ">"], default="<=")
parser.add_argument("-out", help="the output checkm tsv output [%(default)s]", default="checkm_subset.tsv")
args = parser.parse_args()
df = pandas.read_csv(args.checkm, sep="\t", header=0, index_col=0)
if args.completeness:
df = df.query("Completeness {metric} {value}".format(metric=args.comp_metric, value=args.completeness))
if args.contamination:
df = df.query("Contamination {metric} {value}".format(metric=args.cont_metric, value=args.contamination))
df = df.sort_index(0, 'Completeness', ascending=False)
df.to_csv(args.out, sep="\t", index_label="Bin Id")
| hunter-cameron/Bioinformatics | python/checkm_select_bins.py | Python | mit | 1,283 |
#!/usr/bin/python
# (C) Copyright 2004
# BEC Systems <http://bec-systems.com>
# Cliff Brake <cliff.brake@gmail.com>
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
# calculations for PXA255 registers
class gpio:
dir = '0'
set = '0'
clr = '0'
alt = '0'
desc = ''
def __init__(self, dir=0, set=0, clr=0, alt=0, desc=''):
self.dir = dir
self.set = set
self.clr = clr
self.alt = alt
self.desc = desc
# the following is a dictionary of all GPIOs in the system
# the key is the GPIO number
pxa255_alt_func = {
0: ['gpio', 'none', 'none', 'none'],
1: ['gpio', 'gpio reset', 'none', 'none'],
2: ['gpio', 'none', 'none', 'none'],
3: ['gpio', 'none', 'none', 'none'],
4: ['gpio', 'none', 'none', 'none'],
5: ['gpio', 'none', 'none', 'none'],
6: ['gpio', 'MMC clk', 'none', 'none'],
7: ['gpio', '48MHz clock', 'none', 'none'],
8: ['gpio', 'MMC CS0', 'none', 'none'],
9: ['gpio', 'MMC CS1', 'none', 'none'],
10: ['gpio', 'RTC Clock', 'none', 'none'],
11: ['gpio', '3.6MHz', 'none', 'none'],
12: ['gpio', '32KHz', 'none', 'none'],
13: ['gpio', 'none', 'MBGNT', 'none'],
14: ['gpio', 'MBREQ', 'none', 'none'],
15: ['gpio', 'none', 'nCS_1', 'none'],
16: ['gpio', 'none', 'PWM0', 'none'],
17: ['gpio', 'none', 'PWM1', 'none'],
18: ['gpio', 'RDY', 'none', 'none'],
19: ['gpio', 'DREQ[1]', 'none', 'none'],
20: ['gpio', 'DREQ[0]', 'none', 'none'],
21: ['gpio', 'none', 'none', 'none'],
22: ['gpio', 'none', 'none', 'none'],
23: ['gpio', 'none', 'SSP SCLK', 'none'],
24: ['gpio', 'none', 'SSP SFRM', 'none'],
25: ['gpio', 'none', 'SSP TXD', 'none'],
26: ['gpio', 'SSP RXD', 'none', 'none'],
27: ['gpio', 'SSP EXTCLK', 'none', 'none'],
28: ['gpio', 'AC97 bitclk in, I2S bitclock out', 'I2S bitclock in', 'none'],
29: ['gpio', 'AC97 SDATA_IN0', 'I2S SDATA_IN', 'none'],
30: ['gpio', 'I2S SDATA_OUT', 'AC97 SDATA_OUT', 'none'],
31: ['gpio', 'I2S SYNC', 'AC97 SYNC', 'none'],
32: ['gpio', 'AC97 SDATA_IN1', 'I2S SYSCLK', 'none'],
33: ['gpio', 'none', 'nCS_5', 'none'],
34: ['gpio', 'FF RXD', 'MMC CS0', 'none'],
35: ['gpio', 'FF CTS', 'none', 'none'],
36: ['gpio', 'FF DCD', 'none', 'none'],
37: ['gpio', 'FF DSR', 'none', 'none'],
38: ['gpio', 'FF RI', 'none', 'none'],
39: ['gpio', 'MMC CS1', 'FF TXD', 'none'],
40: ['gpio', 'none', 'FF DTR', 'none'],
41: ['gpio', 'none', 'FF RTS', 'none'],
42: ['gpio', 'BT RXD', 'none', 'HW RXD'],
43: ['gpio', 'none', 'BT TXD', 'HW TXD'],
44: ['gpio', 'BT CTS', 'none', 'HW CTS'],
45: ['gpio', 'none', 'BT RTS', 'HW RTS'],
46: ['gpio', 'ICP_RXD', 'STD RXD', 'none'],
47: ['gpio', 'STD TXD', 'ICP_TXD', 'none'],
48: ['gpio', 'HW TXD', 'nPOE', 'none'],
49: ['gpio', 'HW RXD', 'nPWE', 'none'],
50: ['gpio', 'HW CTS', 'nPIOR', 'none'],
51: ['gpio', 'nPIOW', 'HW RTS', 'none'],
52: ['gpio', 'none', 'nPCE[1]', 'none'],
53: ['gpio', 'MMC CLK', 'nPCE[2]', 'none'],
54: ['gpio', 'MMC CLK', 'nPSKSEL', 'none'],
55: ['gpio', 'none', 'nPREG', 'none'],
56: ['gpio', 'nPWAIT', 'none', 'none'],
57: ['gpio', 'nIOIS16', 'none', 'none'],
58: ['gpio', 'none', 'LDD[0]', 'none'],
59: ['gpio', 'none', 'LDD[1]', 'none'],
60: ['gpio', 'none', 'LDD[2]', 'none'],
61: ['gpio', 'none', 'LDD[3]', 'none'],
62: ['gpio', 'none', 'LDD[4]', 'none'],
63: ['gpio', 'none', 'LDD[5]', 'none'],
64: ['gpio', 'none', 'LDD[6]', 'none'],
65: ['gpio', 'none', 'LDD[7]', 'none'],
66: ['gpio', 'MBREQ', 'LDD[8]', 'none'],
67: ['gpio', 'MMC CS0', 'LDD[9]', 'none'],
68: ['gpio', 'MMC CS1', 'LDD[10]', 'none'],
69: ['gpio', 'MMC CLK', 'LDD[11]', 'none'],
70: ['gpio', 'RTC CLK', 'LDD[12]', 'none'],
71: ['gpio', '3.6 MHz', 'LDD[13]', 'none'],
72: ['gpio', '32 KHz', 'LDD[14]', 'none'],
73: ['gpio', 'MBGNT', 'LDD[15]', 'none'],
74: ['gpio', 'none', 'LCD_FCLK', 'none'],
75: ['gpio', 'none', 'LCD_LCLK', 'none'],
76: ['gpio', 'none', 'LCD_PCLK', 'none'],
77: ['gpio', 'none', 'LCD_ACBIAS', 'none'],
78: ['gpio', 'none', 'nCS_2', 'none'],
79: ['gpio', 'none', 'nCS_3', 'none'],
80: ['gpio', 'none', 'nCS_4', 'none'],
81: ['gpio', 'NSSPSCLK', 'none', 'none'],
82: ['gpio', 'NSSPSFRM', 'none', 'none'],
83: ['gpio', 'NSSPTXD', 'NSSPRXD', 'none'],
84: ['gpio', 'NSSPTXD', 'NSSPRXD', 'none'],
}
#def __init__(self, dir=0, set=0, clr=0, alt=0, desc=''):
gpio_list = []
for i in range(0,85):
gpio_list.append(gpio())
#chip select GPIOs
gpio_list[18] = gpio(0, 0, 0, 1, 'RDY')
gpio_list[33] = gpio(1, 1, 0, 2, 'CS5#')
gpio_list[80] = gpio(1, 1, 0, 2, 'CS4#')
gpio_list[79] = gpio(1, 1, 0, 2, 'CS3#')
gpio_list[78] = gpio(1, 1, 0, 2, 'CS2#')
gpio_list[15] = gpio(1, 1, 0, 2, 'CS1#')
gpio_list[22] = gpio(0, 0, 0, 0, 'Consumer IR, PCC_S1_IRQ_O#')
gpio_list[21] = gpio(0, 0, 0, 0, 'IRQ_IDE, PFI')
gpio_list[19] = gpio(0, 0, 0, 0, 'XB_DREQ1, PCC_SO_IRQ_O#')
gpio_list[20] = gpio(0, 0, 0, 0, 'XB_DREQ0')
gpio_list[20] = gpio(0, 0, 0, 0, 'XB_DREQ0')
gpio_list[17] = gpio(0, 0, 0, 0, 'IRQ_AXB')
gpio_list[16] = gpio(1, 0, 0, 2, 'PWM0')
# PCMCIA stuff
gpio_list[57] = gpio(0, 0, 0, 1, 'PCC_IOIS16#')
gpio_list[56] = gpio(0, 0, 0, 1, 'PCC_WAIT#')
gpio_list[55] = gpio(1, 0, 0, 2, 'PCC_REG#')
gpio_list[54] = gpio(1, 0, 0, 2, 'PCC_SCKSEL')
gpio_list[53] = gpio(1, 1, 0, 2, 'PCC_CE2#')
gpio_list[52] = gpio(1, 1, 0, 2, 'PCC_CE1#')
gpio_list[51] = gpio(1, 1, 0, 1, 'PCC_IOW#')
gpio_list[50] = gpio(1, 1, 0, 2, 'PCC_IOR#')
gpio_list[49] = gpio(1, 1, 0, 2, 'PCC_WE#')
gpio_list[48] = gpio(1, 1, 0, 2, 'PCC_OE#')
# SSP port
gpio_list[26] = gpio(0, 0, 0, 1, 'SSP_RXD')
gpio_list[25] = gpio(0, 0, 0, 0, 'SSP_TXD')
gpio_list[24] = gpio(1, 0, 1, 2, 'SSP_SFRM')
gpio_list[23] = gpio(1, 0, 1, 2, 'SSP_SCLK')
gpio_list[27] = gpio(0, 0, 0, 0, 'SSP_EXTCLK')
# audio codec
gpio_list[32] = gpio(0, 0, 0, 0, 'AUD_SDIN1')
gpio_list[31] = gpio(1, 0, 0, 2, 'AC_SYNC')
gpio_list[30] = gpio(1, 0, 0, 2, 'AC_SDOUT')
gpio_list[29] = gpio(0, 0, 0, 1, 'AUD_SDIN0')
gpio_list[28] = gpio(0, 0, 0, 1, 'AC_BITCLK')
# serial ports
gpio_list[39] = gpio(1, 0, 0, 2, 'FF_TXD')
gpio_list[34] = gpio(0, 0, 0, 1, 'FF_RXD')
gpio_list[41] = gpio(1, 0, 0, 2, 'FF_RTS')
gpio_list[35] = gpio(0, 0, 0, 1, 'FF_CTS')
gpio_list[40] = gpio(1, 0, 0, 2, 'FF_DTR')
gpio_list[37] = gpio(0, 0, 0, 1, 'FF_DSR')
gpio_list[38] = gpio(0, 0, 0, 1, 'FF_RI')
gpio_list[36] = gpio(0, 0, 0, 1, 'FF_DCD')
gpio_list[43] = gpio(1, 0, 0, 2, 'BT_TXD')
gpio_list[42] = gpio(0, 0, 0, 1, 'BT_RXD')
gpio_list[45] = gpio(1, 0, 0, 2, 'BT_RTS')
gpio_list[44] = gpio(0, 0, 0, 1, 'BT_CTS')
gpio_list[47] = gpio(1, 0, 0, 1, 'IR_TXD')
gpio_list[46] = gpio(0, 0, 0, 2, 'IR_RXD')
# misc GPIO signals
gpio_list[14] = gpio(0, 0, 0, 0, 'MBREQ')
gpio_list[13] = gpio(0, 0, 0, 0, 'MBGNT')
gpio_list[12] = gpio(0, 0, 0, 0, 'GPIO_12/32K_CLK')
gpio_list[11] = gpio(0, 0, 0, 0, '3M6_CLK')
gpio_list[10] = gpio(1, 0, 1, 0, 'GPIO_10/RTC_CLK/debug LED')
gpio_list[9] = gpio(0, 0, 0, 0, 'MMC_CD#')
gpio_list[8] = gpio(0, 0, 0, 0, 'PCC_S1_CD#')
gpio_list[7] = gpio(0, 0, 0, 0, 'PCC_S0_CD#')
gpio_list[6] = gpio(1, 0, 0, 1, 'MMC_CLK')
gpio_list[5] = gpio(0, 0, 0, 0, 'IRQ_TOUCH#')
gpio_list[4] = gpio(0, 0, 0, 0, 'IRQ_ETH')
gpio_list[3] = gpio(0, 0, 0, 0, 'MQ_IRQ#')
gpio_list[2] = gpio(0, 0, 0, 0, 'BAT_DATA')
gpio_list[1] = gpio(0, 0, 0, 1, 'USER_RESET#')
gpio_list[0] = gpio(0, 0, 0, 1, 'USER_RESET#')
# LCD GPIOs
gpio_list[58] = gpio(1, 0, 0, 2, 'LDD0')
gpio_list[59] = gpio(1, 0, 0, 2, 'LDD1')
gpio_list[60] = gpio(1, 0, 0, 2, 'LDD2')
gpio_list[61] = gpio(1, 0, 0, 2, 'LDD3')
gpio_list[62] = gpio(1, 0, 0, 2, 'LDD4')
gpio_list[63] = gpio(1, 0, 0, 2, 'LDD5')
gpio_list[64] = gpio(1, 0, 0, 2, 'LDD6')
gpio_list[65] = gpio(1, 0, 0, 2, 'LDD7')
gpio_list[66] = gpio(1, 0, 0, 2, 'LDD8')
gpio_list[67] = gpio(1, 0, 0, 2, 'LDD9')
gpio_list[68] = gpio(1, 0, 0, 2, 'LDD10')
gpio_list[69] = gpio(1, 0, 0, 2, 'LDD11')
gpio_list[70] = gpio(1, 0, 0, 2, 'LDD12')
gpio_list[71] = gpio(1, 0, 0, 2, 'LDD13')
gpio_list[72] = gpio(1, 0, 0, 2, 'LDD14')
gpio_list[73] = gpio(1, 0, 0, 2, 'LDD15')
gpio_list[74] = gpio(1, 0, 0, 2, 'FCLK')
gpio_list[75] = gpio(1, 0, 0, 2, 'LCLK')
gpio_list[76] = gpio(1, 0, 0, 2, 'PCLK')
gpio_list[77] = gpio(1, 0, 0, 2, 'ACBIAS')
# calculate registers
pxa_regs = {
'gpdr0':0, 'gpdr1':0, 'gpdr2':0,
'gpsr0':0, 'gpsr1':0, 'gpsr2':0,
'gpcr0':0, 'gpcr1':0, 'gpcr2':0,
'gafr0_l':0, 'gafr0_u':0,
'gafr1_l':0, 'gafr1_u':0,
'gafr2_l':0, 'gafr2_u':0,
}
# U-boot define names
uboot_reg_names = {
'gpdr0':'CFG_GPDR0_VAL', 'gpdr1':'CFG_GPDR1_VAL', 'gpdr2':'CFG_GPDR2_VAL',
'gpsr0':'CFG_GPSR0_VAL', 'gpsr1':'CFG_GPSR1_VAL', 'gpsr2':'CFG_GPSR2_VAL',
'gpcr0':'CFG_GPCR0_VAL', 'gpcr1':'CFG_GPCR1_VAL', 'gpcr2':'CFG_GPCR2_VAL',
'gafr0_l':'CFG_GAFR0_L_VAL', 'gafr0_u':'CFG_GAFR0_U_VAL',
'gafr1_l':'CFG_GAFR1_L_VAL', 'gafr1_u':'CFG_GAFR1_U_VAL',
'gafr2_l':'CFG_GAFR2_L_VAL', 'gafr2_u':'CFG_GAFR2_U_VAL',
}
# bit mappings
bit_mappings = [
{ 'gpio':(0,32), 'shift':1, 'regs':{'dir':'gpdr0', 'set':'gpsr0', 'clr':'gpcr0'} },
{ 'gpio':(32,64), 'shift':1, 'regs':{'dir':'gpdr1', 'set':'gpsr1', 'clr':'gpcr1'} },
{ 'gpio':(64,85), 'shift':1, 'regs':{'dir':'gpdr2', 'set':'gpsr2', 'clr':'gpcr2'} },
{ 'gpio':(0,16), 'shift':2, 'regs':{'alt':'gafr0_l'} },
{ 'gpio':(16,32), 'shift':2, 'regs':{'alt':'gafr0_u'} },
{ 'gpio':(32,48), 'shift':2, 'regs':{'alt':'gafr1_l'} },
{ 'gpio':(48,64), 'shift':2, 'regs':{'alt':'gafr1_u'} },
{ 'gpio':(64,80), 'shift':2, 'regs':{'alt':'gafr2_l'} },
{ 'gpio':(80,85), 'shift':2, 'regs':{'alt':'gafr2_u'} },
]
def stuff_bits(bit_mapping, gpio_list):
gpios = range( bit_mapping['gpio'][0], bit_mapping['gpio'][1])
for gpio in gpios:
for reg in bit_mapping['regs'].keys():
value = eval( 'gpio_list[gpio].%s' % (reg) )
if ( value ):
# we have a high bit
bit_shift = (gpio - bit_mapping['gpio'][0]) * bit_mapping['shift']
bit = value << (bit_shift)
pxa_regs[bit_mapping['regs'][reg]] |= bit
for i in bit_mappings:
stuff_bits(i, gpio_list)
# now print out all regs
registers = pxa_regs.keys()
registers.sort()
for reg in registers:
print '%s: 0x%x' % (reg, pxa_regs[reg])
# print define to past right into U-Boot source code
print
print
for reg in registers:
print '#define %s 0x%x' % (uboot_reg_names[reg], pxa_regs[reg])
# print all GPIOS
print
print
for i in range(len(gpio_list)):
gpio_i = gpio_list[i]
alt_func_desc = pxa255_alt_func[i][gpio_i.alt]
print 'GPIO: %i, dir=%i, set=%i, clr=%i, alt=%s, desc=%s' % (i, gpio_i.dir, gpio_i.set, gpio_i.clr, alt_func_desc, gpio_i.desc)
| daydaygit/flrelse | uboot1.1.6/board/pxa255_idp/pxa_reg_calcs.py | Python | gpl-3.0 | 11,003 |
from __future__ import print_function, division, absolute_import
import functools
import os
import sys
import warnings
# ---------------------------------------------------------------------
# Simple File Read and Store Utilities
# ---------------------------------------------------------------------
def saveToFile(filePath, str):
""" Save string to file.
see also: :func:`readFromFile`
:param filePath: file path to save to
:param str: string to save
"""
with open(filePath, 'w') as f:
f.write(str)
def readFromFile(filePath):
""" Load a file and return contents as a string.
see also: :func:`saveToFile`
:param filePath: file path to read from
:returns: string representation of the contents of the file
"""
with open(filePath, 'r', encoding="utf8") as f:
string = f.read()
return string
def listFiles (wildcardstr):
""" List the files names in the current directory using the wildcard argument
eg te.listFiles ('*.xml')
:param wildcardstr: WIld card using during the file search
:returns: list of file names that match the wildcard
"""
import glob
return glob.glob (wildcardstr)
# ---------------------------------------------------------------------
# Deprecated warning
# ---------------------------------------------------------------------
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
"""
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.warn_explicit(
"Call to deprecated function {}.".format(func.__name__),
category=DeprecationWarning,
filename=func.func_code.co_filename,
lineno=func.func_code.co_firstlineno + 1
)
return func(*args, **kwargs)
return new_func
# ---------------------------------------------------------------------
# Running external tools
# ---------------------------------------------------------------------
def runTool (toolFileName):
""" Call an external application called toolFileName.
Note that .exe extension may be omitted for windows applications.
Include any arguments in arguments parameter.
Example:
returnString = te.runTool (['myplugin', 'arg1', 'arg2'])
If the external tool writes to stdout, this will be captured and returned.
:param toolFileName: argument to external tool
:returns: String return by external tool, if any.
"""
import subprocess
try:
p = os.path.dirname(sys.executable)
root, waste = os.path.split(p)
toolFileName[0] = root + '\\telluriumTools\\' + toolFileName[0] + '\\' + toolFileName[0] + '.exe'
return subprocess.check_output(toolFileName)
except subprocess.CalledProcessError as e:
raise Exception('Tool failed to run correctly or could not be found')
# ---------------------------------------------------------------------
# ODE extraction methods
# ---------------------------------------------------------------------
def getODEsFromSBMLFile (fileName):
""" Given a SBML file name, this function returns the model
as a string of rules and ODEs
>>> te.getODEsFromSBMLFile ('mymodel.xml')
"""
sbmlStr = te.readFromFile (fileName)
extractor = ODEExtractor (sbmlStr)
return extractor.toString()
def getODEsFromSBMLString (sbmlStr):
""" Given a SBML string this fucntion returns the model
as a string of rules and ODEs
>>> te.getODEsFromSBMLString (sbmlStr)
"""
extractor = ODEExtractor (sbmlStr)
return extractor.toString()
def getODEsFromModel (roadrunnerModel):
"""Given a roadrunner instance this function returns
a string of rules and ODEs
>>> r = te.loada ('S1 -> S2; k1*S1; k1=1')
>>> te.getODEsFromModel (r)
"""
from roadrunner import RoadRunner
if type (roadrunnerModel) == RoadRunner:
extractor = ODEExtractor (roadrunnerModel.getSBML())
else:
raise RuntimeError('The argument to getODEsFromModel should be a roadrunner variable')
return extractor.toString()
class Accumulator:
def __init__(self, species_id):
self.reaction_map = {}
self.reactions = []
self.species_id = species_id
def addReaction(self, reaction, stoich):
rid = reaction.getId()
if rid in self.reaction_map:
self.reaction_map[rid]['stoich'] += stoich
else:
self.reaction_map[rid] = {
'reaction': reaction,
'id': rid,
'formula': self.getFormula(reaction),
'stoich': stoich,
}
self.reactions.append(rid)
def getFormula(self, reaction):
return reaction.getKineticLaw().getFormula()
def toString(self, use_ids=False):
lhs = 'd{}/dt'.format(self.species_id)
terms = []
for rid in self.reactions:
if abs(self.reaction_map[rid]['stoich']) == 1:
stoich = ''
else:
stoich = str(abs(self.reaction_map[rid]['stoich'])) + '*'
if len(terms) > 0:
if self.reaction_map[rid]['stoich'] < 0:
op = ' - '
else:
op = ' + '
else:
if self.reaction_map[rid]['stoich'] < 0:
op = '-'
else:
op = ''
if use_ids:
expr = 'v' + self.reaction_map[rid]['id']
else:
expr = self.reaction_map[rid]['formula']
terms.append(op + stoich + expr)
rhs = ''.join(terms)
return lhs + ' = ' + rhs
class ODEExtractor:
def __init__(self, sbmlStr):
try:
try:
import libsbml
except ImportError:
import tesbml as libsbml
except ImportError:
raise Exception("Cannot import libsbml. Try tellurium.installPackage('libsbml')")
self.doc = libsbml.readSBMLFromString (sbmlStr)
self.model = self.doc.getModel()
self.species_map = {}
self.species_symbol_map = {}
self.use_species_names = False
self.use_ids = True
from collections import defaultdict
self.accumulators = {}
self.accumulator_list = []
def reactionParticipant(participant, stoich):
stoich_sign = 1
if stoich < 0:
stoich_sign = -1
if participant.isSetStoichiometry():
stoich = participant.getStoichiometry()
elif participant.isSetStoichiometryMath():
raise RuntimeError('Stoichiometry math not supported')
self.accumulators[participant.getSpecies()].addReaction(r, stoich_sign*stoich)
newReactant = lambda p: reactionParticipant(p, -1)
newProduct = lambda p: reactionParticipant(p, 1)
for s in (self.model.getSpecies(i) for i in range(self.model.getNumSpecies())):
self.species_map[s.getId()] = s
if s.isSetName() and self.use_species_names:
self.species_symbol_map[s.getId()] = s.getName()
else:
self.species_symbol_map[s.getId()] = s.getId()
a = Accumulator(s.getId())
self.accumulators[s.getId()] = a
self.accumulator_list.append(a)
for r in (self.model.getReaction(i) for i in range(self.model.getNumReactions())):
for reactant in (r.getReactant(i) for i in range(r.getNumReactants())):
newReactant(reactant)
for product in (r.getProduct(i) for i in range(r.getNumProducts())):
newProduct(product)
def getRules (self):
r = ''
for i in range (self.model.getNumRules()):
if self.model.getRule(i).getType() == 0:
r += 'd' + self.model.getRule(i).id + '/dt = ' + self.model.getRule(i).formula + '\n'
if self.model.getRule(i).getType() == 1:
r += self.model.getRule(i).id + ' = ' + self.model.getRule(i).formula + '\n'
return r
def getKineticLaws (self):
r = ''
if self.use_ids:
r += '\n'
for rx in (self.model.getReaction(i) for i in range(self.model.getNumReactions())):
r += 'v' + rx.getId() + ' = ' + rx.getKineticLaw().getFormula().replace(" ", "") + '\n'
return r
def getRateOfChange (self, index):
return self.accumulator_list[index].toString(use_ids=self.use_ids) + '\n'
def getRatesOfChange (self):
r = '\n'
for a in self.accumulator_list:
r += a.toString(use_ids=self.use_ids) + '\n'
return r
def toString(self):
r = self.getRules()
r = r + self.getKineticLaws() + '\n'
for index in range (self.model.getNumSpecies()):
if not self.model.getSpecies (index).getBoundaryCondition():
r = r + self.getRateOfChange (index)
return r
| sys-bio/tellurium | tellurium/utils/misc.py | Python | apache-2.0 | 9,311 |
import os
import requests
import tarfile
from zentral.utils.local_dir import get_and_create_local_dir
GITHUB_BEATS_RELEASES_URL = "https://api.github.com/repos/elastic/beats/releases"
FILEBEAT_RELEASE_NAME_TMPL = "filebeat-{version}-{platform}-x86_64"
FILEBEAT_DOWNLOAD_URL_TMPL = "https://artifacts.elastic.co/downloads/beats/filebeat/{release_name}.tar.gz"
def get_filebeat_versions():
resp = requests.get(GITHUB_BEATS_RELEASES_URL)
versions = []
for release in resp.json():
versions.append(release["tag_name"].strip("v"))
versions.sort(key=lambda v: [int(i) for i in v.split("-")[0].split(".")], reverse=True)
return versions
def get_filebeat_binary(version, platform="darwin"):
version = version.strip(".\/")
platform = platform.strip(".\/")
# release dir
releases_root = get_and_create_local_dir("filebeat", "releases")
release_name = FILEBEAT_RELEASE_NAME_TMPL.format(version=version, platform=platform)
# binary exists?
release_dir = os.path.join(releases_root, release_name)
filebeat_binary_path = os.path.join(release_dir, "filebeat")
if not os.path.exists(filebeat_binary_path):
# download release
download_url = FILEBEAT_DOWNLOAD_URL_TMPL.format(release_name=release_name)
resp = requests.get(download_url, stream=True)
# extract release
tf = tarfile.open(fileobj=resp.raw, mode="r:gz")
tf.extractall(path=releases_root)
return filebeat_binary_path
| zentralopensource/zentral | zentral/contrib/filebeat/filebeat_releases.py | Python | apache-2.0 | 1,484 |
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A git command for managing a local cache of git repositories."""
from __future__ import print_function
import contextlib
import errno
import logging
import optparse
import os
import re
import subprocess
import sys
import tempfile
import threading
import time
try:
import urlparse
except ImportError: # For Py3 compatibility
import urllib.parse as urlparse
from download_from_google_storage import Gsutil
import gclient_utils
import lockfile
import metrics
import subcommand
# Analogous to gc.autopacklimit git config.
GC_AUTOPACKLIMIT = 50
GIT_CACHE_CORRUPT_MESSAGE = 'WARNING: The Git cache is corrupt.'
# gsutil creates many processes and threads. Creating too many gsutil cp
# processes may result in running out of resources, and may perform worse due to
# contextr switching. This limits how many concurrent gsutil cp processes
# git_cache runs.
GSUTIL_CP_SEMAPHORE = threading.Semaphore(2)
try:
# pylint: disable=undefined-variable
WinErr = WindowsError
except NameError:
class WinErr(Exception):
pass
class ClobberNeeded(Exception):
pass
def exponential_backoff_retry(fn, excs=(Exception,), name=None, count=10,
sleep_time=0.25, printerr=None):
"""Executes |fn| up to |count| times, backing off exponentially.
Args:
fn (callable): The function to execute. If this raises a handled
exception, the function will retry with exponential backoff.
excs (tuple): A tuple of Exception types to handle. If one of these is
raised by |fn|, a retry will be attempted. If |fn| raises an Exception
that is not in this list, it will immediately pass through. If |excs|
is empty, the Exception base class will be used.
name (str): Optional operation name to print in the retry string.
count (int): The number of times to try before allowing the exception to
pass through.
sleep_time (float): The initial number of seconds to sleep in between
retries. This will be doubled each retry.
printerr (callable): Function that will be called with the error string upon
failures. If None, |logging.warning| will be used.
Returns: The return value of the successful fn.
"""
printerr = printerr or logging.warning
for i in range(count):
try:
return fn()
except excs as e:
if (i+1) >= count:
raise
printerr('Retrying %s in %.2f second(s) (%d / %d attempts): %s' % (
(name or 'operation'), sleep_time, (i+1), count, e))
time.sleep(sleep_time)
sleep_time *= 2
class Mirror(object):
git_exe = 'git.bat' if sys.platform.startswith('win') else 'git'
gsutil_exe = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'gsutil.py')
cachepath_lock = threading.Lock()
UNSET_CACHEPATH = object()
# Used for tests
_GIT_CONFIG_LOCATION = []
@staticmethod
def parse_fetch_spec(spec):
"""Parses and canonicalizes a fetch spec.
Returns (fetchspec, value_regex), where value_regex can be used
with 'git config --replace-all'.
"""
parts = spec.split(':', 1)
src = parts[0].lstrip('+').rstrip('/')
if not src.startswith('refs/'):
src = 'refs/heads/%s' % src
dest = parts[1].rstrip('/') if len(parts) > 1 else src
regex = r'\+%s:.*' % src.replace('*', r'\*')
return ('+%s:%s' % (src, dest), regex)
def __init__(self, url, refs=None, commits=None, print_func=None):
self.url = url
self.fetch_specs = {self.parse_fetch_spec(ref) for ref in (refs or [])}
self.fetch_commits = set(commits or [])
self.basedir = self.UrlToCacheDir(url)
self.mirror_path = os.path.join(self.GetCachePath(), self.basedir)
if print_func:
self.print = self.print_without_file
self.print_func = print_func
else:
self.print = print
def print_without_file(self, message, **_kwargs):
self.print_func(message)
@contextlib.contextmanager
def print_duration_of(self, what):
start = time.time()
try:
yield
finally:
self.print('%s took %.1f minutes' % (what, (time.time() - start) / 60.0))
@property
def bootstrap_bucket(self):
b = os.getenv('OVERRIDE_BOOTSTRAP_BUCKET')
if b:
return b
u = urlparse.urlparse(self.url)
if u.netloc == 'chromium.googlesource.com':
return 'chromium-git-cache'
# Not recognized.
return None
@property
def _gs_path(self):
return 'gs://%s/v2/%s' % (self.bootstrap_bucket, self.basedir)
@classmethod
def FromPath(cls, path):
return cls(cls.CacheDirToUrl(path))
@staticmethod
def UrlToCacheDir(url):
"""Convert a git url to a normalized form for the cache dir path."""
if os.path.isdir(url):
# Ignore the drive letter in Windows
url = os.path.splitdrive(url)[1]
return url.replace('-', '--').replace(os.sep, '-')
parsed = urlparse.urlparse(url)
norm_url = parsed.netloc + parsed.path
if norm_url.endswith('.git'):
norm_url = norm_url[:-len('.git')]
# Use the same dir for authenticated URLs and unauthenticated URLs.
norm_url = norm_url.replace('googlesource.com/a/', 'googlesource.com/')
return norm_url.replace('-', '--').replace('/', '-').lower()
@staticmethod
def CacheDirToUrl(path):
"""Convert a cache dir path to its corresponding url."""
netpath = re.sub(r'\b-\b', '/', os.path.basename(path)).replace('--', '-')
return 'https://%s' % netpath
@classmethod
def SetCachePath(cls, cachepath):
with cls.cachepath_lock:
setattr(cls, 'cachepath', cachepath)
@classmethod
def GetCachePath(cls):
with cls.cachepath_lock:
if not hasattr(cls, 'cachepath'):
try:
cachepath = subprocess.check_output(
[cls.git_exe, 'config'] +
cls._GIT_CONFIG_LOCATION +
['cache.cachepath']).decode('utf-8', 'ignore').strip()
except subprocess.CalledProcessError:
cachepath = os.environ.get('GIT_CACHE_PATH', cls.UNSET_CACHEPATH)
setattr(cls, 'cachepath', cachepath)
ret = getattr(cls, 'cachepath')
if ret is cls.UNSET_CACHEPATH:
raise RuntimeError('No cache.cachepath git configuration or '
'$GIT_CACHE_PATH is set.')
return ret
@staticmethod
def _GetMostRecentCacheDirectory(ls_out_set):
ready_file_pattern = re.compile(r'.*/(\d+).ready$')
ready_dirs = []
for name in ls_out_set:
m = ready_file_pattern.match(name)
# Given <path>/<number>.ready,
# we are interested in <path>/<number> directory
if m and (name[:-len('.ready')] + '/') in ls_out_set:
ready_dirs.append((int(m.group(1)), name[:-len('.ready')]))
if not ready_dirs:
return None
return max(ready_dirs)[1]
def Rename(self, src, dst):
# This is somehow racy on Windows.
# Catching OSError because WindowsError isn't portable and
# pylint complains.
exponential_backoff_retry(
lambda: os.rename(src, dst),
excs=(OSError,),
name='rename [%s] => [%s]' % (src, dst),
printerr=self.print)
def RunGit(self, cmd, print_stdout=True, **kwargs):
"""Run git in a subprocess."""
cwd = kwargs.setdefault('cwd', self.mirror_path)
kwargs.setdefault('print_stdout', False)
if print_stdout:
kwargs.setdefault('filter_fn', self.print)
env = kwargs.get('env') or kwargs.setdefault('env', os.environ.copy())
env.setdefault('GIT_ASKPASS', 'true')
env.setdefault('SSH_ASKPASS', 'true')
self.print('running "git %s" in "%s"' % (' '.join(cmd), cwd))
gclient_utils.CheckCallAndFilter([self.git_exe] + cmd, **kwargs)
def config(self, cwd=None, reset_fetch_config=False):
if cwd is None:
cwd = self.mirror_path
if reset_fetch_config:
try:
self.RunGit(['config', '--unset-all', 'remote.origin.fetch'], cwd=cwd)
except subprocess.CalledProcessError as e:
# If exit code was 5, it means we attempted to unset a config that
# didn't exist. Ignore it.
if e.returncode != 5:
raise
# Don't run git-gc in a daemon. Bad things can happen if it gets killed.
try:
self.RunGit(['config', 'gc.autodetach', '0'], cwd=cwd)
except subprocess.CalledProcessError:
# Hard error, need to clobber.
raise ClobberNeeded()
# Don't combine pack files into one big pack file. It's really slow for
# repositories, and there's no way to track progress and make sure it's
# not stuck.
if self.supported_project():
self.RunGit(['config', 'gc.autopacklimit', '0'], cwd=cwd)
# Allocate more RAM for cache-ing delta chains, for better performance
# of "Resolving deltas".
self.RunGit(['config', 'core.deltaBaseCacheLimit',
gclient_utils.DefaultDeltaBaseCacheLimit()], cwd=cwd)
self.RunGit(['config', 'remote.origin.url', self.url], cwd=cwd)
self.RunGit(['config', '--replace-all', 'remote.origin.fetch',
'+refs/heads/*:refs/heads/*', r'\+refs/heads/\*:.*'], cwd=cwd)
for spec, value_regex in self.fetch_specs:
self.RunGit(
['config', '--replace-all', 'remote.origin.fetch', spec, value_regex],
cwd=cwd)
def bootstrap_repo(self, directory):
"""Bootstrap the repo from Google Storage if possible.
More apt-ly named bootstrap_repo_from_cloud_if_possible_else_do_nothing().
"""
if not self.bootstrap_bucket:
return False
gsutil = Gsutil(self.gsutil_exe, boto_path=None)
# Get the most recent version of the directory.
# This is determined from the most recent version of a .ready file.
# The .ready file is only uploaded when an entire directory has been
# uploaded to GS.
_, ls_out, ls_err = gsutil.check_call('ls', self._gs_path)
ls_out_set = set(ls_out.strip().splitlines())
latest_dir = self._GetMostRecentCacheDirectory(ls_out_set)
if not latest_dir:
self.print('No bootstrap file for %s found in %s, stderr:\n %s' %
(self.mirror_path, self.bootstrap_bucket,
' '.join((ls_err or '').splitlines(True))))
return False
try:
# create new temporary directory locally
tempdir = tempfile.mkdtemp(prefix='_cache_tmp', dir=self.GetCachePath())
self.RunGit(['init', '--bare'], cwd=tempdir)
self.print('Downloading files in %s/* into %s.' %
(latest_dir, tempdir))
with self.print_duration_of('download'):
with GSUTIL_CP_SEMAPHORE:
code = gsutil.call('-m', 'cp', '-r', latest_dir + "/*",
tempdir)
if code:
return False
# Set HEAD to main.
self.RunGit(['symbolic-ref', 'HEAD', 'refs/heads/main'], cwd=tempdir)
# A quick validation that all references are valid.
self.RunGit(['for-each-ref'], print_stdout=False, cwd=tempdir)
except Exception as e:
self.print('Encountered error: %s' % str(e), file=sys.stderr)
gclient_utils.rmtree(tempdir)
return False
# delete the old directory
if os.path.exists(directory):
gclient_utils.rmtree(directory)
self.Rename(tempdir, directory)
return True
def contains_revision(self, revision):
if not self.exists():
return False
if sys.platform.startswith('win'):
# Windows .bat scripts use ^ as escape sequence, which means we have to
# escape it with itself for every .bat invocation.
needle = '%s^^^^{commit}' % revision
else:
needle = '%s^{commit}' % revision
try:
# cat-file exits with 0 on success, that is git object of given hash was
# found.
self.RunGit(['cat-file', '-e', needle])
return True
except subprocess.CalledProcessError:
self.print('Commit with hash "%s" not found' % revision, file=sys.stderr)
return False
def exists(self):
return os.path.isfile(os.path.join(self.mirror_path, 'config'))
def supported_project(self):
"""Returns true if this repo is known to have a bootstrap zip file."""
u = urlparse.urlparse(self.url)
return u.netloc in [
'chromium.googlesource.com',
'chrome-internal.googlesource.com']
def _preserve_fetchspec(self):
"""Read and preserve remote.origin.fetch from an existing mirror.
This modifies self.fetch_specs.
"""
if not self.exists():
return
try:
config_fetchspecs = subprocess.check_output(
[self.git_exe, 'config', '--get-all', 'remote.origin.fetch'],
cwd=self.mirror_path).decode('utf-8', 'ignore')
for fetchspec in config_fetchspecs.splitlines():
self.fetch_specs.add(self.parse_fetch_spec(fetchspec))
except subprocess.CalledProcessError:
logging.warning(
'Tried and failed to preserve remote.origin.fetch from the '
'existing cache directory. You may need to manually edit '
'%s and "git cache fetch" again.' %
os.path.join(self.mirror_path, 'config'))
def _ensure_bootstrapped(
self, depth, bootstrap, reset_fetch_config, force=False):
pack_dir = os.path.join(self.mirror_path, 'objects', 'pack')
pack_files = []
if os.path.isdir(pack_dir):
pack_files = [f for f in os.listdir(pack_dir) if f.endswith('.pack')]
self.print('%s has %d .pack files, re-bootstrapping if >%d or ==0' %
(self.mirror_path, len(pack_files), GC_AUTOPACKLIMIT))
should_bootstrap = (force or
not self.exists() or
len(pack_files) > GC_AUTOPACKLIMIT or
len(pack_files) == 0)
if not should_bootstrap:
if depth and os.path.exists(os.path.join(self.mirror_path, 'shallow')):
logging.warning(
'Shallow fetch requested, but repo cache already exists.')
# Old boostraps may have old default HEAD, so this ensures main is always
# used.
self.RunGit(['symbolic-ref', 'HEAD', 'refs/heads/main'],
cwd=self.mirror_path)
return
if not self.exists():
if os.path.exists(self.mirror_path):
# If the mirror path exists but self.exists() returns false, we're
# in an unexpected state. Nuke the previous mirror directory and
# start fresh.
gclient_utils.rmtree(self.mirror_path)
os.mkdir(self.mirror_path)
elif not reset_fetch_config:
# Re-bootstrapping an existing mirror; preserve existing fetch spec.
self._preserve_fetchspec()
bootstrapped = (not depth and bootstrap and
self.bootstrap_repo(self.mirror_path))
if not bootstrapped:
if not self.exists() or not self.supported_project():
# Bootstrap failed due to:
# 1. No previous cache.
# 2. Project doesn't have a bootstrap folder.
# Start with a bare git dir.
self.RunGit(['init', '--bare'], cwd=self.mirror_path)
# Set HEAD to main. -b is introduced in 2.28 and may not be available
# everywhere.
self.RunGit(['symbolic-ref', 'HEAD', 'refs/heads/main'],
cwd=self.mirror_path)
else:
# Bootstrap failed, previous cache exists; warn and continue.
logging.warning(
'Git cache has a lot of pack files (%d). Tried to re-bootstrap '
'but failed. Continuing with non-optimized repository.' %
len(pack_files))
def _fetch(self,
rundir,
verbose,
depth,
no_fetch_tags,
reset_fetch_config,
prune=True):
self.config(rundir, reset_fetch_config)
fetch_cmd = ['fetch']
if verbose:
fetch_cmd.extend(['-v', '--progress'])
if depth:
fetch_cmd.extend(['--depth', str(depth)])
if no_fetch_tags:
fetch_cmd.append('--no-tags')
if prune:
fetch_cmd.append('--prune')
fetch_cmd.append('origin')
fetch_specs = subprocess.check_output(
[self.git_exe, 'config', '--get-all', 'remote.origin.fetch'],
cwd=rundir).decode('utf-8', 'ignore').strip().splitlines()
for spec in fetch_specs:
try:
self.print('Fetching %s' % spec)
with self.print_duration_of('fetch %s' % spec):
self.RunGit(fetch_cmd + [spec], cwd=rundir, retry=True)
except subprocess.CalledProcessError:
if spec == '+refs/heads/*:refs/heads/*':
raise ClobberNeeded() # Corrupted cache.
logging.warning('Fetch of %s failed' % spec)
for commit in self.fetch_commits:
self.print('Fetching %s' % commit)
try:
with self.print_duration_of('fetch %s' % commit):
self.RunGit(['fetch', 'origin', commit], cwd=rundir, retry=True)
except subprocess.CalledProcessError:
logging.warning('Fetch of %s failed' % commit)
def populate(self,
depth=None,
no_fetch_tags=False,
shallow=False,
bootstrap=False,
verbose=False,
lock_timeout=0,
reset_fetch_config=False):
assert self.GetCachePath()
if shallow and not depth:
depth = 10000
gclient_utils.safe_makedirs(self.GetCachePath())
with lockfile.lock(self.mirror_path, lock_timeout):
try:
self._ensure_bootstrapped(depth, bootstrap, reset_fetch_config)
self._fetch(self.mirror_path, verbose, depth, no_fetch_tags,
reset_fetch_config)
except ClobberNeeded:
# This is a major failure, we need to clean and force a bootstrap.
gclient_utils.rmtree(self.mirror_path)
self.print(GIT_CACHE_CORRUPT_MESSAGE)
self._ensure_bootstrapped(depth,
bootstrap,
reset_fetch_config,
force=True)
self._fetch(self.mirror_path, verbose, depth, no_fetch_tags,
reset_fetch_config)
def update_bootstrap(self, prune=False, gc_aggressive=False, branch='main'):
# The folder is <git number>
gen_number = subprocess.check_output(
[self.git_exe, 'number', branch],
cwd=self.mirror_path).decode('utf-8', 'ignore').strip()
gsutil = Gsutil(path=self.gsutil_exe, boto_path=None)
src_name = self.mirror_path
dest_prefix = '%s/%s' % (self._gs_path, gen_number)
# ls_out lists contents in the format: gs://blah/blah/123...
_, ls_out, _ = gsutil.check_call('ls', self._gs_path)
# Check to see if folder already exists in gs
ls_out_set = set(ls_out.strip().splitlines())
if (dest_prefix + '/' in ls_out_set and
dest_prefix + '.ready' in ls_out_set):
print('Cache %s already exists.' % dest_prefix)
return
# Reduce the number of individual files to download & write on disk.
self.RunGit(['pack-refs', '--all'])
# Run Garbage Collect to compress packfile.
gc_args = ['gc', '--prune=all']
if gc_aggressive:
# The default "gc --aggressive" is often too aggressive for some machines,
# since it attempts to create as many threads as there are CPU cores,
# while not limiting per-thread memory usage, which puts too much pressure
# on RAM on high-core machines, causing them to thrash. Using lower-level
# commands gives more control over those settings.
# This might not be strictly necessary, but it's fast and is normally run
# by 'gc --aggressive', so it shouldn't hurt.
self.RunGit(['reflog', 'expire', '--all'])
# These are the default repack settings for 'gc --aggressive'.
gc_args = ['repack', '-d', '-l', '-f', '--depth=50', '--window=250', '-A',
'--unpack-unreachable=all']
# A 1G memory limit seems to provide comparable pack results as the
# default, even for our largest repos, while preventing runaway memory (at
# least on current Chromium builders which have about 4G RAM per core).
gc_args.append('--window-memory=1g')
# NOTE: It might also be possible to avoid thrashing with a larger window
# (e.g. "--window-memory=2g") by limiting the number of threads created
# (e.g. "--threads=[cores/2]"). Some limited testing didn't show much
# difference in outcomes on our current repos, but it might be worth
# trying if the repos grow much larger and the packs don't seem to be
# getting compressed enough.
self.RunGit(gc_args)
gsutil.call('-m', 'cp', '-r', src_name, dest_prefix)
# Create .ready file and upload
_, ready_file_name = tempfile.mkstemp(suffix='.ready')
try:
gsutil.call('cp', ready_file_name, '%s.ready' % (dest_prefix))
finally:
os.remove(ready_file_name)
# remove all other directory/.ready files in the same gs_path
# except for the directory/.ready file previously created
# which can be used for bootstrapping while the current one is
# being uploaded
if not prune:
return
prev_dest_prefix = self._GetMostRecentCacheDirectory(ls_out_set)
if not prev_dest_prefix:
return
for path in ls_out_set:
if path in (prev_dest_prefix + '/', prev_dest_prefix + '.ready'):
continue
if path.endswith('.ready'):
gsutil.call('rm', path)
continue
gsutil.call('-m', 'rm', '-r', path)
@staticmethod
def DeleteTmpPackFiles(path):
pack_dir = os.path.join(path, 'objects', 'pack')
if not os.path.isdir(pack_dir):
return
pack_files = [f for f in os.listdir(pack_dir) if
f.startswith('.tmp-') or f.startswith('tmp_pack_')]
for f in pack_files:
f = os.path.join(pack_dir, f)
try:
os.remove(f)
logging.warning('Deleted stale temporary pack file %s' % f)
except OSError:
logging.warning('Unable to delete temporary pack file %s' % f)
@subcommand.usage('[url of repo to check for caching]')
@metrics.collector.collect_metrics('git cache exists')
def CMDexists(parser, args):
"""Check to see if there already is a cache of the given repo."""
_, args = parser.parse_args(args)
if not len(args) == 1:
parser.error('git cache exists only takes exactly one repo url.')
url = args[0]
mirror = Mirror(url)
if mirror.exists():
print(mirror.mirror_path)
return 0
return 1
@subcommand.usage('[url of repo to create a bootstrap zip file]')
@metrics.collector.collect_metrics('git cache update-bootstrap')
def CMDupdate_bootstrap(parser, args):
"""Create and uploads a bootstrap tarball."""
# Lets just assert we can't do this on Windows.
if sys.platform.startswith('win'):
print('Sorry, update bootstrap will not work on Windows.', file=sys.stderr)
return 1
parser.add_option('--skip-populate', action='store_true',
help='Skips "populate" step if mirror already exists.')
parser.add_option('--gc-aggressive', action='store_true',
help='Run aggressive repacking of the repo.')
parser.add_option('--prune', action='store_true',
help='Prune all other cached bundles of the same repo.')
parser.add_option('--branch', default='main',
help='Branch to use for bootstrap. (Default \'main\')')
populate_args = args[:]
options, args = parser.parse_args(args)
url = args[0]
mirror = Mirror(url)
if not options.skip_populate or not mirror.exists():
CMDpopulate(parser, populate_args)
else:
print('Skipped populate step.')
# Get the repo directory.
_, args2 = parser.parse_args(args)
url = args2[0]
mirror = Mirror(url)
mirror.update_bootstrap(options.prune, options.gc_aggressive, options.branch)
return 0
@subcommand.usage('[url of repo to add to or update in cache]')
@metrics.collector.collect_metrics('git cache populate')
def CMDpopulate(parser, args):
"""Ensure that the cache has all up-to-date objects for the given repo."""
parser.add_option('--depth', type='int',
help='Only cache DEPTH commits of history')
parser.add_option(
'--no-fetch-tags',
action='store_true',
help=('Don\'t fetch tags from the server. This can speed up '
'fetch considerably when there are many tags.'))
parser.add_option('--shallow', '-s', action='store_true',
help='Only cache 10000 commits of history')
parser.add_option('--ref', action='append',
help='Specify additional refs to be fetched')
parser.add_option('--commit', action='append',
help='Specify additional commits to be fetched')
parser.add_option('--no_bootstrap', '--no-bootstrap',
action='store_true',
help='Don\'t bootstrap from Google Storage')
parser.add_option('--ignore_locks',
'--ignore-locks',
action='store_true',
help='NOOP. This flag will be removed in the future.')
parser.add_option('--break-locks',
action='store_true',
help='Break any existing lock instead of just ignoring it')
parser.add_option('--reset-fetch-config', action='store_true', default=False,
help='Reset the fetch config before populating the cache.')
options, args = parser.parse_args(args)
if not len(args) == 1:
parser.error('git cache populate only takes exactly one repo url.')
if options.ignore_locks:
print('ignore_locks is no longer used. Please remove its usage.')
if options.break_locks:
print('break_locks is no longer used. Please remove its usage.')
url = args[0]
mirror = Mirror(url, refs=options.ref, commits=options.commit)
kwargs = {
'no_fetch_tags': options.no_fetch_tags,
'verbose': options.verbose,
'shallow': options.shallow,
'bootstrap': not options.no_bootstrap,
'lock_timeout': options.timeout,
'reset_fetch_config': options.reset_fetch_config,
}
if options.depth:
kwargs['depth'] = options.depth
mirror.populate(**kwargs)
@subcommand.usage('Fetch new commits into cache and current checkout')
@metrics.collector.collect_metrics('git cache fetch')
def CMDfetch(parser, args):
"""Update mirror, and fetch in cwd."""
parser.add_option('--all', action='store_true', help='Fetch all remotes')
parser.add_option('--no_bootstrap', '--no-bootstrap',
action='store_true',
help='Don\'t (re)bootstrap from Google Storage')
parser.add_option(
'--no-fetch-tags',
action='store_true',
help=('Don\'t fetch tags from the server. This can speed up '
'fetch considerably when there are many tags.'))
options, args = parser.parse_args(args)
# Figure out which remotes to fetch. This mimics the behavior of regular
# 'git fetch'. Note that in the case of "stacked" or "pipelined" branches,
# this will NOT try to traverse up the branching structure to find the
# ultimate remote to update.
remotes = []
if options.all:
assert not args, 'fatal: fetch --all does not take a repository argument'
remotes = subprocess.check_output([Mirror.git_exe, 'remote'])
remotes = remotes.decode('utf-8', 'ignore').splitlines()
elif args:
remotes = args
else:
current_branch = subprocess.check_output(
[Mirror.git_exe, 'rev-parse', '--abbrev-ref', 'HEAD'])
current_branch = current_branch.decode('utf-8', 'ignore').strip()
if current_branch != 'HEAD':
upstream = subprocess.check_output(
[Mirror.git_exe, 'config', 'branch.%s.remote' % current_branch])
upstream = upstream.decode('utf-8', 'ignore').strip()
if upstream and upstream != '.':
remotes = [upstream]
if not remotes:
remotes = ['origin']
cachepath = Mirror.GetCachePath()
git_dir = os.path.abspath(subprocess.check_output(
[Mirror.git_exe, 'rev-parse', '--git-dir']).decode('utf-8', 'ignore'))
git_dir = os.path.abspath(git_dir)
if git_dir.startswith(cachepath):
mirror = Mirror.FromPath(git_dir)
mirror.populate(
bootstrap=not options.no_bootstrap,
no_fetch_tags=options.no_fetch_tags,
lock_timeout=options.timeout)
return 0
for remote in remotes:
remote_url = subprocess.check_output(
[Mirror.git_exe, 'config', 'remote.%s.url' % remote])
remote_url = remote_url.decode('utf-8', 'ignore').strip()
if remote_url.startswith(cachepath):
mirror = Mirror.FromPath(remote_url)
mirror.print = lambda *args: None
print('Updating git cache...')
mirror.populate(
bootstrap=not options.no_bootstrap,
no_fetch_tags=options.no_fetch_tags,
lock_timeout=options.timeout)
subprocess.check_call([Mirror.git_exe, 'fetch', remote])
return 0
@subcommand.usage('do not use - it is a noop.')
@metrics.collector.collect_metrics('git cache unlock')
def CMDunlock(parser, args):
"""This command does nothing."""
print('This command does nothing and will be removed in the future.')
class OptionParser(optparse.OptionParser):
"""Wrapper class for OptionParser to handle global options."""
def __init__(self, *args, **kwargs):
optparse.OptionParser.__init__(self, *args, prog='git cache', **kwargs)
self.add_option('-c', '--cache-dir',
help=(
'Path to the directory containing the caches. Normally '
'deduced from git config cache.cachepath or '
'$GIT_CACHE_PATH.'))
self.add_option('-v', '--verbose', action='count', default=1,
help='Increase verbosity (can be passed multiple times)')
self.add_option('-q', '--quiet', action='store_true',
help='Suppress all extraneous output')
self.add_option('--timeout', type='int', default=0,
help='Timeout for acquiring cache lock, in seconds')
def parse_args(self, args=None, values=None):
# Create an optparse.Values object that will store only the actual passed
# options, without the defaults.
actual_options = optparse.Values()
_, args = optparse.OptionParser.parse_args(self, args, actual_options)
# Create an optparse.Values object with the default options.
options = optparse.Values(self.get_default_values().__dict__)
# Update it with the options passed by the user.
options._update_careful(actual_options.__dict__)
# Store the options passed by the user in an _actual_options attribute.
# We store only the keys, and not the values, since the values can contain
# arbitrary information, which might be PII.
metrics.collector.add('arguments', list(actual_options.__dict__.keys()))
if options.quiet:
options.verbose = 0
levels = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]
logging.basicConfig(level=levels[min(options.verbose, len(levels) - 1)])
try:
global_cache_dir = Mirror.GetCachePath()
except RuntimeError:
global_cache_dir = None
if options.cache_dir:
if global_cache_dir and (
os.path.abspath(options.cache_dir) !=
os.path.abspath(global_cache_dir)):
logging.warning('Overriding globally-configured cache directory.')
Mirror.SetCachePath(options.cache_dir)
return options, args
def main(argv):
dispatcher = subcommand.CommandDispatcher(__name__)
return dispatcher.execute(OptionParser(), argv)
if __name__ == '__main__':
try:
with metrics.collector.print_notice_and_exit():
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
| CoherentLabs/depot_tools | git_cache.py | Python | bsd-3-clause | 31,704 |
##
# Copyright 2009-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing xml R, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import os
import easybuild.tools.environment as env
from easybuild.easyblocks.generic.rpackage import RPackage
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import build_option
from easybuild.tools.modules import get_software_root
class EB_XML(RPackage):
"""Support for installing the XML R package."""
def install_R_package(self, cmd, inp=None):
"""Customized install procedure for XML R package, add zlib lib path to LIBS."""
libs = os.getenv('LIBS', '')
zlib = get_software_root('zlib')
if zlib:
env.setvar('LIBS', "%s -L%s" % (libs, os.path.join(zlib, 'lib')))
elif 'zlib' in build_option('filter_deps'):
self.log.info("zlib included in list of filtered dependencies, so no need to tweak $LIBS")
else:
raise EasyBuildError("zlib module not loaded (required)")
super(EB_XML, self).install_R_package(cmd, inp)
| akesandgren/easybuild-easyblocks | easybuild/easyblocks/x/xml.py | Python | gpl-2.0 | 2,135 |
# coding: utf-8
# Copyright 2017 video++ Project, SJTU MediaLab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from vpp import log
from vpp.config import CONF
from vpp.queue import running_queue
from vpp.operator import BASE_WORK_DIR
from vpp.tracker.job_tracker import JobTracker
# use __name__ as logger name here will lead all other loggers in files of
# this folder logging each log 2 times, so we add some suffix to it
_logger_name = __name__ + '_any_string'
LOG = log.get_logger(_logger_name, CONF.jobtracker_log_file)
def run_tracker():
"""periodically checks on-running job status, download and merge the
results when a job is done
"""
LOOP_INTERVAL = CONF.jobtracker.loop_interval
job_tracker = JobTracker(BASE_WORK_DIR)
iterations = 0
while True:
if iterations % 60 == 0:
LOG.info("tracker iterations: %d" % iterations)
iterations += 1
job_ids = running_queue.keys()
if len(job_ids) == 0:
time.sleep(1)
continue
for job_id in job_ids:
job_tracker.track_job(job_id)
time.sleep(LOOP_INTERVAL)
| ArthurChiao/videoplusplus | vpp/tracker/__init__.py | Python | apache-2.0 | 1,645 |
import re
from copy import copy
from datetime import datetime
from django.conf import settings
from django.contrib.sites.models import Site
from django.core import mail
from django.core.cache import cache
from django.test.client import RequestFactory
import bleach
import mock
import waffle
from nose.tools import eq_
from kitsune.sumo.tests import TestCase
from kitsune.users.tests import add_permission, user
from kitsune.wiki.models import Revision, Document
from kitsune.wiki.tasks import (
send_reviewed_notification, rebuild_kb, schedule_rebuild_kb,
_rebuild_kb_chunk, render_document_cascade)
from kitsune.wiki.tests import TestCaseBase, revision
from kitsune.wiki.tests.test_parser import doc_rev_parser
REVIEWED_EMAIL_CONTENT = """Your revision has been reviewed.
%s has approved your revision to the document %s.
Message from the reviewer:
%s
To view the history of this document, click the following link, or paste \
it into your browser's location bar:
https://testserver/en-US/kb/%s/history
"""
class RebuildTestCase(TestCase):
rf = RequestFactory()
ALWAYS_EAGER = settings.CELERY_ALWAYS_EAGER
def setUp(self):
# create some random revisions.
revision(save=True)
revision(is_approved=True, save=True)
revision(is_approved=True, save=True)
revision(is_approved=True, save=True)
revision(is_approved=True, save=True)
# TODO: fix this crap
self.old_settings = copy(settings._wrapped.__dict__)
settings.CELERY_ALWAYS_EAGER = True
def tearDown(self):
cache.delete(settings.WIKI_REBUILD_TOKEN)
settings._wrapped.__dict__ = self.old_settings
settings.CELERY_ALWAYS_EAGER = self.ALWAYS_EAGER
@mock.patch.object(rebuild_kb, 'delay')
@mock.patch.object(waffle, 'switch_is_active')
def test_eager_queue(self, switch_is_active, delay):
switch_is_active.return_value = True
schedule_rebuild_kb()
assert not cache.get(settings.WIKI_REBUILD_TOKEN)
assert not delay.called
@mock.patch.object(rebuild_kb, 'delay')
@mock.patch.object(waffle, 'switch_is_active')
def test_task_queue(self, switch_is_active, delay):
switch_is_active.return_value = True
settings.CELERY_ALWAYS_EAGER = False
schedule_rebuild_kb()
assert cache.get(settings.WIKI_REBUILD_TOKEN)
assert delay.called
@mock.patch.object(rebuild_kb, 'delay')
@mock.patch.object(waffle, 'switch_is_active')
def test_already_queued(self, switch_is_active, delay):
switch_is_active.return_value = True
cache.set(settings.WIKI_REBUILD_TOKEN, True)
schedule_rebuild_kb()
assert cache.get(settings.WIKI_REBUILD_TOKEN)
assert not delay.called
@mock.patch.object(rebuild_kb, 'delay')
@mock.patch.object(cache, 'get')
@mock.patch.object(waffle, 'switch_is_active')
def test_dont_queue(self, switch_is_active, get, delay):
switch_is_active.return_value = False
schedule_rebuild_kb()
assert not get.called
assert not delay.called
@mock.patch.object(_rebuild_kb_chunk, 'apply_async')
def test_rebuild_chunk(self, apply_async):
cache.set(settings.WIKI_REBUILD_TOKEN, True)
rebuild_kb()
assert not cache.get(settings.WIKI_REBUILD_TOKEN)
assert 'args' in apply_async.call_args[1]
# There should be 4 documents with an approved revision
eq_(4, len(apply_async.call_args[1]['args'][0]))
class ReviewMailTestCase(TestCaseBase):
"""Test that the review mail gets sent."""
def setUp(self):
self.user = user(save=True)
add_permission(self.user, Revision, 'review_revision')
def _approve_and_send(self, revision, reviewer, message):
revision.reviewer = reviewer
revision.reviewed = datetime.now()
revision.is_approved = True
revision.save()
send_reviewed_notification(revision, revision.document, message)
@mock.patch.object(Site.objects, 'get_current')
def test_reviewed_notification(self, get_current):
get_current.return_value.domain = 'testserver'
rev = revision()
doc = rev.document
msg = 'great work!'
self._approve_and_send(rev, self.user, msg)
# Two emails will be sent, one each for the reviewer and the reviewed.
eq_(2, len(mail.outbox))
eq_('Your revision has been approved: %s' % doc.title,
mail.outbox[0].subject)
eq_([rev.creator.email], mail.outbox[0].to)
eq_(REVIEWED_EMAIL_CONTENT % (
self.user.username, doc.title, msg, doc.slug), mail.outbox[0].body)
@mock.patch.object(Site.objects, 'get_current')
def test_reviewed_by_creator_no_notification(self, get_current):
get_current.return_value.domain = 'testserver'
rev = revision()
msg = "great work!"
self._approve_and_send(rev, rev.creator, msg)
# Verify no email was sent
eq_(0, len(mail.outbox))
@mock.patch.object(Site.objects, 'get_current')
def test_unicode_notifications(self, get_current):
get_current.return_value.domain = 'testserver'
rev = revision()
doc = rev.document
doc.title = u'Foo \xe8 incode'
msg = 'foo'
self._approve_and_send(rev, self.user, msg)
# Two emails will be sent, one each for the reviewer and the reviewed.
eq_(2, len(mail.outbox))
eq_('Your revision has been approved: %s' % doc.title,
mail.outbox[0].subject)
@mock.patch.object(Site.objects, 'get_current')
def test_escaping(self, get_current):
get_current.return_value.domain = 'testserver'
rev = revision()
doc = rev.document
doc.title = '"All about quotes"'
msg = 'foo & "bar"'
self._approve_and_send(rev, self.user, msg)
# Two emails will be sent, one each for the reviewer and the reviewed.
eq_(2, len(mail.outbox))
eq_('Your revision has been approved: %s' % doc.title,
mail.outbox[0].subject)
assert '"' not in mail.outbox[0].body
assert '"All about quotes"' in mail.outbox[0].body
assert 'foo & "bar"' in mail.outbox[0].body
class TestDocumentRenderCascades(TestCaseBase):
def _clean(self, d):
"""Get a clean and normalized version of a documents html."""
html = Document.objects.get(slug=d.slug).html
return re.sub(r'\s+', ' ', bleach.clean(html, strip=True)).strip()
def test_cascade(self):
d1, _, _ = doc_rev_parser('one ', title='Template:D1')
d2, _, _ = doc_rev_parser('[[T:D1]] two', title='Template:D2')
d3, _, _ = doc_rev_parser('[[T:D1]] [[T:D2]] three', title='D3')
eq_(self._clean(d3), u'one one two three')
revision(document=d1, content='ONE', is_approved=True, save=True)
render_document_cascade(d1)
eq_(self._clean(d1), u'ONE')
eq_(self._clean(d2), u'ONE two')
eq_(self._clean(d3), u'ONE ONE two three')
| orvi2014/kitsune | kitsune/wiki/tests/test_tasks.py | Python | bsd-3-clause | 7,075 |
# This file is part of the Indico plugins.
# Copyright (C) 2002 - 2022 CERN
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
import math
from marshmallow import fields, pre_load
from indico.modules.search.base import SearchTarget
from indico.modules.search.result_schemas import (AggregationSchema, AttachmentResultSchema, BucketSchema,
EventNoteResultSchema, EventResultSchema, ResultItemSchema,
ResultSchema)
from indico_citadel.util import format_aggregations
class CitadelEventResultSchema(EventResultSchema):
@pre_load
def _translate_keys(self, data, **kwargs):
data = data.copy()
data['event_type'] = data.pop('type_format')
return data
class CitadelAttachmentResultSchema(AttachmentResultSchema):
@pre_load
def _translate_keys(self, data, **kwargs):
data = data.copy()
data['user'] = data.pop('persons', None)
data['attachment_type'] = data.pop('type_format')
return data
class CitadelEventNoteResultSchema(EventNoteResultSchema):
@pre_load
def _translate_keys(self, data, **kwargs):
data = data.copy()
data['user'] = data.pop('persons', None)
return data
class _CitadelBucketSchema(BucketSchema):
@pre_load
def _make_filter(self, data, **kwargs):
data = data.copy()
range_from = data.pop('from_as_string', None)
range_to = data.pop('to_as_string', None)
if range_from or range_to:
data['filter'] = f'[{range_from or "*"} TO {range_to or "*"}]'
else:
data['filter'] = data['key']
return data
class CitadelAggregationSchema(AggregationSchema):
buckets = fields.List(fields.Nested(_CitadelBucketSchema))
class CitadelResultItemSchema(ResultItemSchema):
type_schemas = {
**ResultItemSchema.type_schemas,
SearchTarget.event.name: CitadelEventResultSchema,
SearchTarget.attachment.name: CitadelAttachmentResultSchema,
SearchTarget.event_note.name: CitadelEventNoteResultSchema,
}
class CitadelResultSchema(ResultSchema):
results = fields.List(fields.Nested(CitadelResultItemSchema), required=True)
aggregations = fields.Dict(fields.String(), fields.Nested(CitadelAggregationSchema))
@pre_load
def _extract_data(self, data, **kwargs):
from .search import filters
total = data['hits']['total']
pages = min(1000, math.ceil(total / self.context['results_per_page']))
# The citadel service stores every indexable/queryable attribute in a _data
# This extraction should ensure Indico is abstracted from that complexity
results = [
{
**item['metadata'].pop('_data'),
**item['metadata'],
'highlight': {
key.removeprefix('_data.'): value for key, value in item['highlight'].items()
}
}
for item in data['hits']['hits']
]
return {
'aggregations': format_aggregations(data['aggregations'], filters),
'results': results,
'total': total,
'pages': pages,
}
| indico/indico-plugins | citadel/indico_citadel/result_schemas.py | Python | mit | 3,366 |
import pandas as pd
from featuretools.computational_backends import calculate_feature_matrix
from featuretools.entityset import EntitySet
from featuretools.synthesis.deep_feature_synthesis import DeepFeatureSynthesis
from featuretools.utils import entry_point
@entry_point('featuretools_dfs')
def dfs(entities=None,
relationships=None,
entityset=None,
target_entity=None,
cutoff_time=None,
instance_ids=None,
agg_primitives=None,
trans_primitives=None,
groupby_trans_primitives=None,
allowed_paths=None,
max_depth=2,
ignore_entities=None,
ignore_variables=None,
primitive_options=None,
seed_features=None,
drop_contains=None,
drop_exact=None,
where_primitives=None,
max_features=-1,
cutoff_time_in_index=False,
save_progress=None,
features_only=False,
training_window=None,
approximate=None,
chunk_size=None,
n_jobs=1,
dask_kwargs=None,
verbose=False,
return_variable_types=None,
progress_callback=None):
'''Calculates a feature matrix and features given a dictionary of entities
and a list of relationships.
Args:
entities (dict[str -> tuple(pd.DataFrame, str, str)]): Dictionary of
entities. Entries take the format
{entity id -> (dataframe, id column, (time_column))}.
relationships (list[(str, str, str, str)]): List of relationships
between entities. List items are a tuple with the format
(parent entity id, parent variable, child entity id, child variable).
entityset (EntitySet): An already initialized entityset. Required if
entities and relationships are not defined.
target_entity (str): Entity id of entity on which to make predictions.
cutoff_time (pd.DataFrame or Datetime): Specifies times at which to
calculate each instance. The resulting feature matrix will use data
up to and including the cutoff_time. Can either be a DataFrame with
'instance_id' and 'time' columns, a DataFrame with the name of the
index variable in the target entity and a time column, a
list of values, or a single
value to calculate for all instances. If the dataframe has more than
two columns, any additional columns will be added to the resulting
feature matrix.
instance_ids (list): List of instances on which to calculate features. Only
used if cutoff_time is a single datetime.
agg_primitives (list[str or AggregationPrimitive], optional): List of Aggregation
Feature types to apply.
Default: ["sum", "std", "max", "skew", "min", "mean", "count", "percent_true", "num_unique", "mode"]
trans_primitives (list[str or TransformPrimitive], optional):
List of Transform Feature functions to apply.
Default: ["day", "year", "month", "weekday", "haversine", "num_words", "num_characters"]
groupby_trans_primitives (list[str or :class:`.primitives.TransformPrimitive`], optional):
list of Transform primitives to make GroupByTransformFeatures with
allowed_paths (list[list[str]]): Allowed entity paths on which to make
features.
max_depth (int) : Maximum allowed depth of features.
ignore_entities (list[str], optional): List of entities to
blacklist when creating features.
ignore_variables (dict[str -> list[str]], optional): List of specific
variables within each entity to blacklist when creating features.
primitive_options (list[dict[str or tuple[str] -> dict] or dict[str or tuple[str] -> dict, optional]):
Specify options for a single primitive or a group of primitives.
Lists of option dicts are used to specify options per input for primitives
with multiple inputs. Each option ``dict`` can have the following keys:
``"include_entities"``
List of entities to be included when creating features for
the primitive(s). All other entities will be ignored
(list[str]).
``"ignore_entities"``
List of entities to be blacklisted when creating features
for the primitive(s) (list[str]).
``"include_variables"``
List of specific variables within each entity to include when
creating feautres for the primitive(s). All other variables
in a given entity will be ignored (dict[str -> list[str]]).
``"ignore_variables"``
List of specific variables within each entityt to blacklist
when creating features for the primitive(s) (dict[str ->
list[str]]).
``"include_groupby_entities"``
List of Entities to be included when finding groupbys. All
other entities will be ignored (list[str]).
``"ignore_groupby_entities"``
List of entities to blacklist when finding groupbys
(list[str]).
``"include_groupby_variables"``
List of specific variables within each entity to include as
groupbys, if applicable. All other variables in each
entity will be ignored (dict[str -> list[str]]).
``"ignore_groupby_variables"``
List of specific variables within each entity to blacklist
as groupbys (dict[str -> list[str]]).
seed_features (list[:class:`.FeatureBase`]): List of manually defined
features to use.
drop_contains (list[str], optional): Drop features
that contains these strings in name.
drop_exact (list[str], optional): Drop features that
exactly match these strings in name.
where_primitives (list[str or PrimitiveBase], optional):
List of Primitives names (or types) to apply with where clauses.
Default:
["count"]
max_features (int, optional) : Cap the number of generated features to
this number. If -1, no limit.
features_only (bool, optional): If True, returns the list of
features without calculating the feature matrix.
cutoff_time_in_index (bool): If True, return a DataFrame with a MultiIndex
where the second index is the cutoff time (first is instance id).
DataFrame will be sorted by (time, instance_id).
training_window (Timedelta or str, optional):
Window defining how much time before the cutoff time data
can be used when calculating features. If ``None`` , all data
before cutoff time is used. Defaults to ``None``. Month and year
units are not relative when Pandas Timedeltas are used. Relative
units should be passed as a Featuretools Timedelta or a string.
approximate (Timedelta): Bucket size to group instances with similar
cutoff times by for features with costly calculations. For example,
if bucket is 24 hours, all instances with cutoff times on the same
day will use the same calculation for expensive features.
save_progress (str, optional): Path to save intermediate computational results.
n_jobs (int, optional): number of parallel processes to use when
calculating feature matrix
chunk_size (int or float or None or "cutoff time", optional): Number
of rows of output feature matrix to calculate at time. If passed an
integer greater than 0, will try to use that many rows per chunk.
If passed a float value between 0 and 1 sets the chunk size to that
percentage of all instances. If passed the string "cutoff time",
rows are split per cutoff time.
dask_kwargs (dict, optional): Dictionary of keyword arguments to be
passed when creating the dask client and scheduler. Even if n_jobs
is not set, using `dask_kwargs` will enable multiprocessing.
Main parameters:
cluster (str or dask.distributed.LocalCluster):
cluster or address of cluster to send tasks to. If unspecified,
a cluster will be created.
diagnostics port (int):
port number to use for web dashboard. If left unspecified, web
interface will not be enabled.
Valid keyword arguments for LocalCluster will also be accepted.
return_variable_types (list[Variable] or str, optional): Types of
variables to return. If None, default to
Numeric, Discrete, and Boolean. If given as
the string 'all', use all available variable types.
progress_callback (callable): function to be called with incremental progress updates.
Has the following parameters:
update: percentage change (float between 0 and 100) in progress since last call
progress_percent: percentage (float between 0 and 100) of total computation completed
time_elapsed: total time in seconds that has elapsed since start of call
Examples:
.. code-block:: python
from featuretools.primitives import Mean
# cutoff times per instance
entities = {
"sessions" : (session_df, "id"),
"transactions" : (transactions_df, "id", "transaction_time")
}
relationships = [("sessions", "id", "transactions", "session_id")]
feature_matrix, features = dfs(entities=entities,
relationships=relationships,
target_entity="transactions",
cutoff_time=cutoff_times)
feature_matrix
features = dfs(entities=entities,
relationships=relationships,
target_entity="transactions",
features_only=True)
'''
if not isinstance(entityset, EntitySet):
entityset = EntitySet("dfs", entities, relationships)
dfs_object = DeepFeatureSynthesis(target_entity, entityset,
agg_primitives=agg_primitives,
trans_primitives=trans_primitives,
groupby_trans_primitives=groupby_trans_primitives,
max_depth=max_depth,
where_primitives=where_primitives,
allowed_paths=allowed_paths,
drop_exact=drop_exact,
drop_contains=drop_contains,
ignore_entities=ignore_entities,
ignore_variables=ignore_variables,
primitive_options=primitive_options,
max_features=max_features,
seed_features=seed_features)
features = dfs_object.build_features(
verbose=verbose, return_variable_types=return_variable_types)
if features_only:
return features
if isinstance(cutoff_time, pd.DataFrame):
feature_matrix = calculate_feature_matrix(features,
entityset=entityset,
cutoff_time=cutoff_time,
training_window=training_window,
approximate=approximate,
cutoff_time_in_index=cutoff_time_in_index,
save_progress=save_progress,
chunk_size=chunk_size,
n_jobs=n_jobs,
dask_kwargs=dask_kwargs,
verbose=verbose,
progress_callback=progress_callback)
else:
feature_matrix = calculate_feature_matrix(features,
entityset=entityset,
cutoff_time=cutoff_time,
instance_ids=instance_ids,
training_window=training_window,
approximate=approximate,
cutoff_time_in_index=cutoff_time_in_index,
save_progress=save_progress,
chunk_size=chunk_size,
n_jobs=n_jobs,
dask_kwargs=dask_kwargs,
verbose=verbose,
progress_callback=progress_callback)
return feature_matrix, features
| Featuretools/featuretools | featuretools/synthesis/dfs.py | Python | bsd-3-clause | 13,558 |
# -*- coding: utf-8 -*-
#
# Telemetric documentation build configuration file, created by
# sphinx-quickstart on Tue Sep 4 14:21:46 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
extensions = ['sphinx.ext.autodoc', # 'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
#'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Telemetric'
copyright = u'2017, Samuel Abels'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3'
# The full version, including alpha/beta/rc tags.
release = '0.3.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Telemetric'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'telemetric.tex', u'Telemetric Documentation',
u'Samuel Abels', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'telemetric', u'Telemetric Documentation',
[u'Samuel Abels'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'telemetric', u'Telemetric Documentation',
u'Samuel Abels', 'Telemetric', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Telemetric'
epub_author = u'Samuel Abels'
epub_publisher = u'Samuel Abels'
epub_copyright = u'2017, Samuel Abels'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
def skip(app, what, name, obj, skip, options):
if name == "__init__":
return False
return skip
def setup(app):
app.connect("autodoc-skip-member", skip)
| knipknap/telemetric | docs/conf.py | Python | mit | 9,676 |
""" Commerce API Service. """
from django.conf import settings
from edx_rest_api_client.client import EdxRestApiClient
from eventtracking import tracker
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
ECOMMERCE_DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
def create_tracking_context(user):
""" Assembles attributes from user and request objects to be sent along
in ecommerce api calls for tracking purposes. """
context_tracker = tracker.get_tracker().resolve_context()
return {
'lms_user_id': user.id,
'lms_client_id': context_tracker.get('client_id'),
'lms_ip': context_tracker.get('ip'),
}
def is_commerce_service_configured():
"""
Return a Boolean indicating whether or not configuration is present to use
the external commerce service.
"""
ecommerce_api_url = configuration_helpers.get_value("ECOMMERCE_API_URL", settings.ECOMMERCE_API_URL)
ecommerce_api_signing_key = configuration_helpers.get_value(
"ECOMMERCE_API_SIGNING_KEY", settings.ECOMMERCE_API_SIGNING_KEY,
)
return bool(ecommerce_api_url and ecommerce_api_signing_key)
def ecommerce_api_client(user):
""" Returns an E-Commerce API client setup with authentication for the specified user. """
jwt_auth = configuration_helpers.get_value("JWT_AUTH", settings.JWT_AUTH)
return EdxRestApiClient(
configuration_helpers.get_value("ECOMMERCE_API_URL", settings.ECOMMERCE_API_URL),
configuration_helpers.get_value("ECOMMERCE_API_SIGNING_KEY", settings.ECOMMERCE_API_SIGNING_KEY),
user.username,
user.profile.name if hasattr(user, 'profile') else None,
user.email,
tracking_context=create_tracking_context(user),
issuer=jwt_auth['JWT_ISSUER'],
expires_in=jwt_auth['JWT_EXPIRATION']
)
| Learningtribes/edx-platform | openedx/core/djangoapps/commerce/utils.py | Python | agpl-3.0 | 1,844 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateBatch
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dataproc
# [START dataproc_v1_generated_BatchController_CreateBatch_sync]
from google.cloud import dataproc_v1
def sample_create_batch():
# Create a client
client = dataproc_v1.BatchControllerClient()
# Initialize request argument(s)
batch = dataproc_v1.Batch()
batch.pyspark_batch.main_python_file_uri = "main_python_file_uri_value"
request = dataproc_v1.CreateBatchRequest(
parent="parent_value",
batch=batch,
)
# Make the request
operation = client.create_batch(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END dataproc_v1_generated_BatchController_CreateBatch_sync]
| googleapis/python-dataproc | samples/generated_samples/dataproc_v1_generated_batch_controller_create_batch_sync.py | Python | apache-2.0 | 1,661 |
#!/usr/bin/env python
from __future__ import print_function
import emitter
class XMLEmitter(emitter.Emitter):
def preface(self):
return """<?xml version="1.0" encoding="utf-8"?>
<!-- Dynamically generated list of documented logfile messages (generated by parse.py) -->
<loggermessagefile>
"""
def postface(self):
return "</loggermessagefile>"
def start(self):
self.fh = open("LogMessages.xml", mode='w')
print(self.preface(), file=self.fh)
def emit(self, doccos):
self.start()
for docco in doccos:
print(' <logformat name="%s">' % docco.name, file=self.fh)
if docco.url is not None:
print(' <url>%s</url>' % docco.url, file=self.fh)
if docco.description is not None:
print(' <description>%s</description>' %
docco.description, file=self.fh)
print(' <fields>', file=self.fh)
for f in docco.fields_order:
print(' <field name="%s">' % f, file=self.fh)
if "description" in docco.fields[f]:
print(' <description>%s</description>' %
docco.fields[f]["description"], file=self.fh)
if "bits" in docco.fields[f]:
print(' <bits>%s</bits>' %
docco.fields[f]["bits"], file=self.fh)
print(' </field>', file=self.fh)
print(' </fields>', file=self.fh)
print(' </logformat>', file=self.fh)
print("", file=self.fh)
self.stop()
def stop(self):
print(self.postface(), file=self.fh)
self.fh.close()
| squilter/ardupilot | Tools/autotest/logger_metadata/emit_xml.py | Python | gpl-3.0 | 1,771 |
import datetime
from decimal import Decimal
import types
import six
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_unicode(strings_only=True).
"""
return isinstance(obj, (
six.integer_types +
(types.NoneType,
datetime.datetime, datetime.date, datetime.time,
float, Decimal))
)
def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first, saves 30-40% when s is an instance of
# six.text_type. This function gets called often in that setting.
if isinstance(s, six.text_type):
return s
if strings_only and is_protected_type(s):
return s
try:
if not isinstance(s, six.string_types):
if hasattr(s, '__unicode__'):
s = s.__unicode__()
else:
if six.PY3:
if isinstance(s, bytes):
s = six.text_type(s, encoding, errors)
else:
s = six.text_type(s)
else:
s = six.text_type(bytes(s), encoding, errors)
else:
# Note: We use .decode() here, instead of six.text_type(s,
# encoding, errors), so that if s is a SafeBytes, it ends up being
# a SafeText at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if not isinstance(s, Exception):
raise UnicodeDecodeError(*e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = ' '.join([force_unicode(arg, encoding, strings_only,
errors) for arg in s])
return s
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/external/org_mozilla_bleach/bleach/encoding.py | Python | bsd-2-clause | 2,277 |
import sys
import time
import asyncio
import getpass
import threading
from enum import Enum
try:
from socket import socketpair
except ImportError:
from asyncio.windows_utils import socketpair
from ..common import config
from ..common import utils
class ClientState(Enum):
Initial = 1
Handshake = 2
Authentication = 3
Command = 4
class ClientProtocol(asyncio.Protocol):
def __init__(self, client, username, passwd, *args, **kwargs):
super().__init__(*args, **kwargs)
self.username = username
self.passwd = passwd # should probably send over SSL if I keep going after the challenge
self.client = client
self.state = ClientState.Initial
self.client_actions = {
ClientState.Handshake: self.receive_handshake,
ClientState.Authentication: self.receive_auth,
ClientState.Command: self.receive_command_result,
}
self.interpreter = Interpreter(self)
def connection_made(self, transport):
self.transport = transport
self.send_handshake()
def data_received(self, data):
result = self.client_actions.get(self.state, self.unimplemented_action)(data)
def connection_lost(self, exc):
print('connection lost')
self.interpreter_done = True
self.interpreter.stop()
self.client.loop.stop()
def send_handshake(self):
self.transport.write(config.version.encode())
self.state = ClientState.Handshake
def receive_handshake(self, data):
msg = data.decode().strip()
if msg != config.version:
print('Invalid handshake, quitting...')
self.transport.close()
s = '{}:{}'.format(self.username, self.passwd)
self.transport.write(s.encode())
self.state = ClientState.Authentication
def receive_auth(self, data):
msg = data.decode().strip()
if msg == 'NO':
print('Failed to authenticate, quitting...')
self.transport.close()
elif msg == 'OK':
print('\n\tLogged in successfully! Loading game...')
self.state = ClientState.Command
self.interpreter.start()
def receive_command_result(self, data):
msg = data.decode().strip()
print('\n\t'+msg+'\n\t=> ', end='')
def unimplemented_action(self, data):
print('client tried to execute an unimplemented state "{}"'.format(self.state))
class Interpreter(object):
def __init__(self, client_protocol):
self.protocol = client_protocol
self.commands = {
'echo': lambda *args: self.send('echo ' + (' '.join(args))),
'serverquit': lambda: self.send('quit'),
'quit': lambda: self.send('disconnect'),
'say': self.say,
'n': lambda: self.send('go north'),
's': lambda: self.send('go south'),
'e': lambda: self.send('go east'),
'w': lambda: self.send('go west'),
'help': self.help,
}
def start(self):
self.running = True
self.thread = threading.Thread(target=self.run)
self.thread.start()
def stop(self, *args):
print('stopping interpreter and closing transport')
self.running = False
self.protocol.transport.close()
def send(self, msg):
self.protocol.transport.write(msg.encode())
def run(self):
self.send('look')
while self.running:
cmdinp = input('\t=> ')
if not cmdinp:
continue
cmdstr, *args = [c.strip() for c in cmdinp.split(' ') if c]
self.commands.get(cmdstr, lambda *args: self.server_command(cmdstr, *args))(*args)
def server_command(self, cmd, *args):
servercmd = ' '.join((cmd, ' '.join(args)))
self.send(servercmd)
def say(self, *args):
msg = 'say ' + ' '.join(args)
self.send(msg)
def help(self, msg=None, *args):
commands = '\n\t\t' + '\n\t\t'.join(self.commands.keys())
if msg:
print('\t'+msg)
print(commands)
print('\n\t\tThere may also be area-specific commands that you can find!\n')
class Client(object):
def run(self):
print('''
_____ __ ___ _
\_ \ / / _____ _____ /\/\ _ _ /\/\ /\ /\ / \__| | ___ _ __
/ /\/ / / / _ \ \ / / _ \ / \| | | | / \/ / \ \/ /\ / _` |/ _ \ '__|
/\/ /_ / /__| (_) \ V / __/ / /\/\ \ |_| | / /\/\ \ \_/ / /_// (_| | __/ |
\____/ \____/\___/ \_/ \___| \/ \/\__, | \/ \/\___/___,' \__,_|\___|_|
|___/
Welcome! Please enter your username and password to begin.
''')
self.main_loop(*self.get_userinfo())
def get_userinfo(self):
return input('\tusername > '), getpass.getpass('\tpassword > ')
def main_loop(self, username, passwd):
self.loop = asyncio.get_event_loop()
coro = self.loop.create_connection(lambda: ClientProtocol(self, username, passwd), config.host, config.port)
self.loop.run_until_complete(coro)
self.loop.run_forever()
self.loop.close()
| mavroskardia/ilovemymudder | mudder/src/client/client.py | Python | mit | 5,259 |
from PyQt4 import QtCore, QtGui
from m2 import *
class GSequEditor(QtGui.QDialog):
def __init__(self):
QtGui.QDialog.__init__(self)
self.setupUi(self)
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(540, 373)
self.textEdit = QtGui.QTextEdit(Dialog)
self.textEdit.setGeometry(QtCore.QRect(10, 10, 511, 251))
self.textEdit.setObjectName("textEdit")
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
self.buttonBox.setGeometry(QtCore.QRect(310, 320, 206, 34))
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), self.finalizeMe)
self.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), self.reject)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "Global Sequence Editor", None, QtGui.QApplication.UnicodeUTF8))
self.textEdit.setHtml(QtGui.QApplication.translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Sans\'; font-size:10pt; font-weight:400; font-style:normal;\">\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
def setModel(self,m2,skin):
self.m2 = m2
self.skin = skin
s = ""
for i in m2.gSequ:
s += str(i.Timestamp)+"\n"
self.textEdit.setPlainText(s)
def finalizeMe(self):
self.saveMe()
self.accept()
def saveMe(self):
t= self.textEdit.toPlainText().split("\n")
c = 0
gsequ = []
for i in t:
if (i!=""):
g = GlobalSequence()
g.Timestamp = int(i)
gsequ.append(g)
c += 1
self.m2.hdr.global_sequences.count = c
self.m2.gSequ = gsequ
| GanjaNoel/pym2 | pymodeleditor/gsequedit.py | Python | lgpl-3.0 | 2,116 |
from Tkinter import *
from PIL import ImageTk
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import sys
import time
import serial
import matplotlib.pyplot as plt
__author__ = 'tjd08a'
# Using user given port, open a serial connection
port = None
testing = False
for arg in sys.argv:
port = arg
ser = None
if not testing:
ser = serial.Serial(port, baudrate=57600, timeout=10)
data_points = 0 # Data points collected from serial connection
path_img = "Images/" # Path to find pictures for GUI
is_started = False # True if GUI will start receiving data
command_mode = False # True if user can input commands in the GUI
clear_to_send = False # True if the GUI can send a message
command_sent = False # True if a command has been inputted
send_start = True # True if the Wifly connection does an initial handshake
left_activated = False
mid_left_activated = False
right_activated = False
mid_right_activated = False
previous = 9
previous_angle = 0
# Configuration Variables
delay = 400 # Time for method to interrupt/how often to check for serial messages
reset_sleep = 5 # How long to make the wifly connection sleep at start up
y_upper_limit = 71 # Biggest data value from serial connection to expect
y_lower_limit = 5 # Lowest data value from serial connection to expect
angle = 20
up_number = 1 # Value to represent the Up Direction
back_number = 3 # Value to represent the Down/Back direction
right_number = 2 # Value to represent a Right Turn
left_number = 4 # Value to represent a Left Turn
idle_number = 5 # Value to represent an Idle Command
ack = 0 # Value to represent an acknowledgement
right_limit = 30
left_limit = 30
# End Configuration
# Initiate the graph for sensor data
fig = plt.Figure()
ax = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
ax.set_ylim(y_lower_limit, y_upper_limit)
ax.set_xlim(0, 10)
ax.set_ylabel("Sensor Reading (cm)")
ax.set_title("Sensor Data (Left)")
ax2.set_ylim(y_lower_limit, y_upper_limit)
ax2.set_xlim(0, 10)
ax2.set_ylabel("Sensor Reading (cm)")
ax2.set_title("Sensor Data (Right)")
xdata, ydata = [], []
ydata1, ydata2, ydata3 = [], [], []
line, = ax.plot(xdata, ydata, 'r-')
line1, = ax.plot(xdata, ydata, 'g-')
line2, = ax2.plot(xdata, ydata, 'b-')
line3, = ax2.plot(xdata, ydata, 'm-')
if not testing:
# Reboot sequence below
ser.write('$$$')
time.sleep(1)
ser.write('reboot\r')
time.sleep(reset_sleep)
ser.flushInput()
ser.flushOutput()
# Add a message to the textbox in the GUI
# Used for debugging and errors
def update_log(txt):
global log_panel
log_panel.insert(END, txt)
log_panel.yview(END)
# Terminates the GUI/Program
def exit_call(event=None):
global root
root.destroy()
sys.exit(0)
# Triggers when up button is pressed
def up_press(event):
global up, hal, rover_forward, hal_img
hal.config(image=rover_forward) # Switches the up arrow image on the GUI
up.flash() # Makes the up button flash
up.invoke() # Invoke the callback for the up button
hal.after(250, lambda: hal.config(image=hal_img)) # Switch back to the arrow
# Triggers when back button is pressed
def back_press(event):
global back, rover_backward
hal.config(image=rover_backward)
back.flash()
back.invoke()
hal.after(250, lambda: hal.config(image=hal_img))
# Triggers when left button is pressed
def left_press(event):
global left
left.flash()
left.invoke()
# Triggers when right button is pressed
def right_press(event):
global right
right.flash()
right.invoke()
# Callback for up button
def up_call():
global command_mode, clear_to_send
global command_sent, up_number
# Switches GUI to command mode
if not command_mode:
command_mode = True
clear_to_send = True
# Sends a command if one hasn't been already been sent
if clear_to_send and command_mode:
val = chr(up_number)
ser.write(val)
command_sent = True
# Callback for back button
def back_call():
global command_mode, clear_to_send
global command_sent, back_number
if not command_mode:
command_mode = True
clear_to_send = True
if clear_to_send and command_mode:
val = chr(back_number)
ser.write(val)
command_sent = True
# Callback for left button
def left_call():
global command_mode, clear_to_send
global command_sent, left_number
if not command_mode:
command_mode = True
clear_to_send = True
if clear_to_send and command_mode:
val = chr(left_number)
ser.write(val)
command_sent = True
# Callback for right button
def right_call():
global command_mode
global clear_to_send
global command_sent
global right_number
if not command_mode:
command_mode = True
clear_to_send = True
if clear_to_send and command_mode:
val = chr(right_number)
ser.write(val)
command_sent = True
# Callback for hal or center button
def hal_call(event=None):
global command_mode
# Turns off command mode if on
# Does nothing if not in command mode
if command_mode:
command_mode = False
log_txt = "Exiting command mode, opening pod bay doors\n"
update_log(log_txt)
else:
log_txt = "I can't let you do that Dave, this button does nothing.\n"
update_log(log_txt)
# Callback for start button
def start_call(event=None):
global is_started, start
global log_panel, send_start
start.flash() # Flash start button
# Lets the GUI start receiving commands if it's not doing so
if not is_started:
is_started = True
log_txt = "Control Panel Will Begin Receiving Data\n"
update_log(log_txt)
# Sends a handshake if enabled
if send_start:
ser.write('RD')
# Callback method that occurs after user specified delay
# Handles data processing and non-GUI actions
def data_callback():
global fig, line, ax, ax2
global line1, line2, line3
global xdata, ydata
global ydata1, ydata2, ydata3
global is_started, command_mode
global data_points, clear_to_send
global command_sent, y_upper_limit, y_lower_limit
global idle_number, ack
global left_sensor, right_sensor
global right_limit, left_limit
global left_activated, right_activated
global angle, map_tiles, previous, previous_angle
# Only activates if the GUI has been started
if is_started:
# Checks for data from the serial connection
# True if there is data waiting
bytes_waiting = ser.inWaiting()
if bytes_waiting:
# Reads a byte from the connection
# Converts the ASCII to a number
letters = ser.read(5)
val = ord(letters[0])
val2 = ord(letters[1])
val3 = ord(letters[2])
val4 = ord(letters[3])
val5 = ord(letters[4])
print val5
if command_mode:
# Receives a non-ack value in command mode
if val != 0:
error_text = "Error: Invalid command mode value received %d\n" % val
update_log(error_text)
else:
clear_to_send = True
else:
# Receives a value outside the expected range
if(val > y_upper_limit and val < y_lower_limit):
error_text = "Error: Invalid value received %d\n" % val
update_log(error_text)
else:
if(val < right_limit or val2 < right_limit):
if(not right_activated):
right_sensor.config(bg="red")
right_activated = True
else:
if(right_activated):
right_sensor.config(bg="green")
right_activated = False
if(val3 < left_limit or val4 < left_limit):
if(not left_activated):
left_sensor.config(bg="red")
left_activated = True
else:
if(left_activated):
left_sensor.config(bg="green")
left_activated = False
# If valid data has been received, appends it to the graph
xdata.append(data_points)
ydata.append(val)
ydata1.append(val2)
ydata2.append(val3)
ydata3.append(val4)
line.set_xdata(xdata)
line.set_ydata(ydata)
line1.set_xdata(xdata)
line1.set_ydata(ydata1)
line2.set_xdata(xdata)
line2.set_ydata(ydata2)
line3.set_xdata(xdata)
line3.set_ydata(ydata3)
xmin, xmax = ax.get_xlim()
# Increases the size of the x-axis if enough data points
# have been received.
if data_points > xmax:
ax.set_xlim(xmin, xmax + 5)
ax2.set_xlim(xmin, xmax + 5)
# Updates graph
fig.canvas.draw()
if previous != None:
if(previous_angle == 20):
map_tiles[previous].config(bg="blue", fg="black", text="H")
else:
map_tiles[previous].config(bg="green", fg="green")
previous_angle = val5
if data_points < 8:
map_tiles[8 - data_points].config(bg="yellow", fg="yellow")
previous = 8 - data_points
elif data_points == 8:
map_tiles[10].config(bg="yellow", fg="yellow")
previous = 10
elif data_points < 17:
map_tiles[2 + data_points].config(bg="yellow", fg="yellow")
previous = 2 + data_points
elif data_points == 17:
map_tiles[29].config(bg="yellow", fg="yellow")
previous = 29
else:
map_tiles[46 - data_points].config(bg="yellow", fg="yellow")
previous = 46 - data_points
if(46 - data_points == 21):
is_started = False
if(previous_angle == 20):
map_tiles[21].config(bg="blue", fg="black", text="H")
data_points += 1
ser.write(chr(ack))
# If a command hasn't been sent by the GUI,
# sends an idle command. Used to keep the connection alive.
if command_mode:
if not command_sent:
idle = chr(idle_number)
ser.write(idle)
else:
command_sent = False
root.after(delay, data_callback)
# Builds/lays out GUI
root = Tk()
scroll_region = Canvas(root, width=1240, height=600)
scroll_region.grid(row=0, column=0, sticky = 'nsew')
container = Frame(scroll_region)
container.pack(fill="both", expand=True)
scroll_region.create_window((0,0), window=container, anchor="nw",
tags="self.frame")
intro_txt = "Rover 9000 Control Panel"
straight_arrow = ImageTk.PhotoImage(file=path_img + "straight.png")
right_arrow = ImageTk.PhotoImage(file=path_img + "right_turn.png")
left_arrow = ImageTk.PhotoImage(file=path_img + "left_turn.png")
back_arrow = ImageTk.PhotoImage(file=path_img + "back.png")
hal_img = ImageTk.PhotoImage(file=path_img + "hal.jpg")
rover_forward = ImageTk.PhotoImage(file=path_img + "rover_forward.png")
rover_backward = ImageTk.PhotoImage(file=path_img + "rover_backward.png")
rover = ImageTk.PhotoImage(file=path_img + "Rover2.png")
ramp = ImageTk.PhotoImage(file=path_img + "ramp2.jpg")
intro = Label(container, text=intro_txt, bg="azure", font=('Arial', 22))
intro.grid(row=0,column=0,columnspan=5)
hal = Button(container, image=hal_img, command=hal_call)
hal.grid(row=2,column=1)
up = Button(container, image=straight_arrow, command=up_call, bg="red")
up.grid(row=1,column=1)
back = Button(container, image=back_arrow, command=back_call, bg="red")
back.grid(row=3,column=1, pady=(0,20))
left = Button(container, image=left_arrow, command=left_call, bg="red")
left.grid(row=2,column=0, padx=(10,0))
right = Button(container, image=right_arrow, command=right_call, bg="red")
right.grid(row=2,column=2)
exit_button = Button(container, text="EXIT", bg="firebrick", fg="white", command=exit_call, font=('Arial',22))
exit_button.grid(row=3,column=2)
start = Button(container, text="START", bg="green", fg="white", font=('Arial',22), command=start_call)
start.grid(row=3,column=0)
log_panel = Text(container, height=2, width=40)
#log_panel.grid(row=4, column=0, columnspan=3, sticky='nsew')
# Attach scrollbar to log panel text box
scroll = Scrollbar(container, command = log_panel.yview)
#scroll.grid(row=4, column=3, padx=(0,2), sticky='nsew')
log_panel['yscrollcommand'] = scroll.set
window_scroll = Scrollbar(root, command = scroll_region.yview)
window_scroll.grid(row=0, column=1, sticky='nsew')
scroll_region['yscrollcommand'] = window_scroll.set
canvas = FigureCanvasTkAgg(fig, master=container)
canvas.show()
canvas.get_tk_widget().grid(column=4, row=1, rowspan=3)
proximity_sensor = Frame(container)
sensor_label = Label(container, text="Sensor Statuses", bg="azure", font=('Arial', 16))
#sensor_label.grid(row = 5, column = 1)
left_sensor = Label(proximity_sensor, text="L", bg="green", font=('Arial', 22))
left_sensor.grid(row=0, column=0)
right_sensor = Label(proximity_sensor, text="R", bg="green", font=('Arial', 22))
right_sensor.grid(row=0, column=2)
tilt_sensor = Label(container, text="Ramp", bg="green", font=('Arial', 22))
#tilt_sensor.grid(row=7, column=1, pady=(10,0))
rover_icon = Label(proximity_sensor, image=rover)
rover_icon.grid(row=0, column=1)
terrain_map = Frame(container, borderwidth=5)
terrain_map.grid(row=0, column=5, rowspan = 3, padx=(10,0))
proximity_sensor.grid(row=3, column=5, pady=(1, 0))
map_tiles = []
for i in xrange(46):
tile = Label(terrain_map, bg="green", fg="green", font=('Arial', 14),
bd=5, text="OK", relief=RAISED)
if(i<8):
tile["bg"] = "red"
tile["fg"] = "red"
tile.grid(row=i+1, column=0)
elif(i<18):
tile.grid(row=i-8, column=1)
map_tiles.append(tile)
elif(i<28):
tile.grid(row=i-18, column=2)
map_tiles.append(tile)
elif(i<38):
tile.grid(row=i-28, column=3)
map_tiles.append(tile)
else:
tile["bg"] = "red"
tile["fg"] = "red"
tile.grid(row=i-37, column=4)
map_tiles[9].config(bg="yellow", fg="yellow")
width = container.winfo_width()
height = container.winfo_height()
scroll_region.configure(scrollregion=(0,0,width,700))
#scroll_region.configure(scrollregion=scroll_region.bbox("all"))
# Binds callback methods to the buttons
root.bind('<Up>', up_press)
root.bind('<Down>',back_press)
root.bind('<Left>',left_press)
root.bind('<Right>',right_press)
root.bind('<Escape>',exit_call)
root.bind('<Return>', start_call)
root.bind('<space>', hal_call)
root.configure(bg="azure")
root.resizable(width=FALSE, height=FALSE)
root.after(delay, data_callback)
root.mainloop()
| cspang1/4534-08 | src/supervisory/MS4/UI_Graph.py | Python | gpl-3.0 | 15,623 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
© Copyright 2015-2016, 3D Robotics.
mission_import_export.py:
This example demonstrates how to import and export files in the Waypoint file format
(http://qgroundcontrol.org/mavlink/waypoint_protocol#waypoint_file_format). The commands are imported
into a list, and can be modified before saving and/or uploading.
Documentation is provided at http://python.dronekit.io/examples/mission_import_export.html
"""
from __future__ import print_function
from dronekit import connect, Command
import time
#Set up option parsing to get connection string
import argparse
parser = argparse.ArgumentParser(description='Demonstrates mission import/export from a file.')
parser.add_argument('--connect',
help="Vehicle connection target string. If not specified, SITL automatically started and used.")
args = parser.parse_args()
connection_string = args.connect
sitl = None
#Start SITL if no connection string specified
if not connection_string:
import dronekit_sitl
sitl = dronekit_sitl.start_default()
connection_string = sitl.connection_string()
# Connect to the Vehicle
print('Connecting to vehicle on: %s' % connection_string)
vehicle = connect(connection_string, wait_ready=True)
# Check that vehicle is armable.
# This ensures home_location is set (needed when saving WP file)
while not vehicle.is_armable:
print(" Waiting for vehicle to initialise...")
time.sleep(1)
def readmission(aFileName):
"""
Load a mission from a file into a list. The mission definition is in the Waypoint file
format (http://qgroundcontrol.org/mavlink/waypoint_protocol#waypoint_file_format).
This function is used by upload_mission().
"""
print("\nReading mission from file: %s" % aFileName)
cmds = vehicle.commands
missionlist=[]
with open(aFileName) as f:
for i, line in enumerate(f):
if i==0:
if not line.startswith('QGC WPL 110'):
raise Exception('File is not supported WP version')
else:
linearray=line.split('\t')
ln_index=int(linearray[0])
ln_currentwp=int(linearray[1])
ln_frame=int(linearray[2])
ln_command=int(linearray[3])
ln_param1=float(linearray[4])
ln_param2=float(linearray[5])
ln_param3=float(linearray[6])
ln_param4=float(linearray[7])
ln_param5=float(linearray[8])
ln_param6=float(linearray[9])
ln_param7=float(linearray[10])
ln_autocontinue=int(linearray[11].strip())
cmd = Command( 0, 0, 0, ln_frame, ln_command, ln_currentwp, ln_autocontinue, ln_param1, ln_param2, ln_param3, ln_param4, ln_param5, ln_param6, ln_param7)
missionlist.append(cmd)
return missionlist
def upload_mission(aFileName):
"""
Upload a mission from a file.
"""
#Read mission from file
missionlist = readmission(aFileName)
print("\nUpload mission from a file: %s" % aFileName)
#Clear existing mission from vehicle
print(' Clear mission')
cmds = vehicle.commands
cmds.clear()
#Add new mission to vehicle
for command in missionlist:
cmds.add(command)
print(' Upload mission')
vehicle.commands.upload()
def download_mission():
"""
Downloads the current mission and returns it in a list.
It is used in save_mission() to get the file information to save.
"""
print(" Download mission from vehicle")
missionlist=[]
cmds = vehicle.commands
cmds.download()
cmds.wait_ready()
for cmd in cmds:
missionlist.append(cmd)
return missionlist
def save_mission(aFileName):
"""
Save a mission in the Waypoint file format
(http://qgroundcontrol.org/mavlink/waypoint_protocol#waypoint_file_format).
"""
print("\nSave mission from Vehicle to file: %s" % aFileName)
#Download mission from vehicle
missionlist = download_mission()
#Add file-format information
output='QGC WPL 110\n'
#Add home location as 0th waypoint
home = vehicle.home_location
output+="%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (0,1,0,16,0,0,0,0,home.lat,home.lon,home.alt,1)
#Add commands
for cmd in missionlist:
commandline="%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (cmd.seq,cmd.current,cmd.frame,cmd.command,cmd.param1,cmd.param2,cmd.param3,cmd.param4,cmd.x,cmd.y,cmd.z,cmd.autocontinue)
output+=commandline
with open(aFileName, 'w') as file_:
print(" Write mission to file")
file_.write(output)
def printfile(aFileName):
"""
Print a mission file to demonstrate "round trip"
"""
print("\nMission file: %s" % aFileName)
with open(aFileName) as f:
for line in f:
print(' %s' % line.strip())
import_mission_filename = 'mpmission.txt'
export_mission_filename = 'exportedmission.txt'
#Upload mission from file
upload_mission(import_mission_filename)
#Download mission we just uploaded and save to a file
save_mission(export_mission_filename)
#Close vehicle object before exiting script
print("Close vehicle object")
vehicle.close()
# Shut down simulator if it was started.
if sitl is not None:
sitl.stop()
print("\nShow original and uploaded/downloaded files:")
#Print original file (for demo purposes only)
printfile(import_mission_filename)
#Print exported file (for demo purposes only)
printfile(export_mission_filename)
| dronekit/dronekit-python | examples/mission_import_export/mission_import_export.py | Python | apache-2.0 | 5,616 |
import resources.requires as Rq
from resources.roots import PUBLIC_ROOT
def requires():
Rq.required('bootstrap','https://github.com/twbs/bootstrap/releases/download/v3.3.7/bootstrap-3.3.7-dist.zip',PUBLIC_ROOT)
Rq.required('jquery','https://code.jquery.com/jquery-3.2.1.js',PUBLIC_ROOT)
#more requires here
#Rq.required(name,url,path)
print('All requires were installs') | barjuegocreador93/pycab | pycabRequires.py | Python | gpl-3.0 | 391 |
from plugins import BasePlugin
from plugins import PluginsData
from etllib.conf import Conf
from etllib.db import DB
from etllib.metrics import Metrics
from etllib.json_helper import JSONHelper as jh
from operator import itemgetter
import json
import re
class Transformations:
def __init__(self, tr=[], first_field='eid', last_field='json'):
self.defaul_type = 'string'
self.defaul_regex = None
self.defaul_sub_key = 'year'
self.defaul_template = {}
self.defaul_is_sql_feild = False
self.first_field = first_field
self.last_field = last_field
self.t = {}
self.sql_fields = []
self.all_sql_fields = []
self.load(tr, first_field, last_field)
def load(self, tr, first_field, last_field):
for tr_item in tr:
field = tr_item['field']
ftype = tr_item.get('type', 'string')
fisql = tr_item.get('sql_field', False)
fregx = tr_item.get('regex', None)
ftemp = tr_item.get('template', None)
self.t[field] = {
'type': ftype,
'sql_field': fisql,
'regex': fregx,
'template': ftemp
}
if fisql:
self.sql_fields.append(field)
self.all_sql_fields = [first_field] + self.sql_fields + [last_field]
def get_type(self, key):
try:
return self.t[key]['type']
except:
return self.defaul_type
def get_regex(self, key):
try:
return self.t[key]['regex']
except:
return self.defaul_regex
def get_sub_key(self, key):
try:
return self.t[key]['sub_key']
except:
return self.defaul_sub_key
def get_template(self, key):
try:
return json.loads(self.t[key]['template'])
except:
if self.t[key]['type'] == 'map':
raise
return self.defaul_template
def is_sql_field(self, key):
try:
return self.t[key]['sql_field']
except:
return self.defaul_is_sql_feild
def get_sql_fields(self):
return self.sql_fields
def get_all_sql_fields(self):
return self.all_sql_fields
def encode(self, s):
try:
return unicode(s, errors='ignore').encode("utf-8")
except:
return s
def regex_extract(self, k, v):
regex_str = self.get_regex(k)
if regex_str:
try:
v = re.match(regex_str, v).group('extract')
except:
v = ''
return v
class JsonizerPlugin(BasePlugin):
def init(self, rule):
self.match_rules = []
self.transformations = None
def get_params(self, rule):
return rule['action'].get('params', {})
def load_transformations(self, rule, first_field, last_field):
t = rule['action'].get('transformations', {})
self.transformations = Transformations(t, first_field, last_field)
def get_sql(self, rule):
params = self.get_params(rule)
sql = rule['action'].get('data', '')
for k, v in params.items():
p = '${0}'.format(k)
v = str(v)
sql = sql.replace(p, v)
return sql
def make_entities(self, data):
e = {}
for record in data.values:
#eid = record[data.get_field_id('planid')]
eid = record[0]
k = record[data.get_field_id('k')]
v = record[data.get_field_id('v')]
v = self.transformations.encode(v)
v = self.transformations.regex_extract(k,v)
if self.transformations.get_type(k) == 'list':
e[eid] = e.get(eid, {
'id': eid
})
e[eid][k] = e[eid].get(k, [])
e[eid][k].append(v)
elif self.transformations.get_type(k) == 'map':
e[eid] = e.get(eid, {
'id': eid
})
template = self.transformations.get_template(k)
sub_k_name = self.transformations.get_sub_key(k)
sub_k = record[data.get_field_id(sub_k_name)]
e[eid][k] = e[eid].get(k, template)
if not template:
e[eid][k][sub_k] = v
else:
sub_k = type(template.keys()[0])(sub_k)
if sub_k in e[eid][k].keys():
e[eid][k][sub_k] = type(template[sub_k])(v)
else:
e[eid] = e.get(eid, {
'id': eid
})
regex_str = self.transformations.get_regex(k)
e[eid][k] = v
return e
def package_entities(self, entities):
e = PluginsData()
allsqlfields = self.transformations.get_all_sql_fields()
subsqlfields = self.transformations.get_sql_fields()
e.fields = allsqlfields
e.values = []
for eid in entities:
eid_str = str(eid)
try:
ejson_str = json.dumps(entities[eid])
except:
print entities[eid]
raise
v = [eid_str] + [
entities[eid].get(sqlfield, '')
for sqlfield in subsqlfields
] + [ejson_str]
e.values.append(tuple(v))
return e
def load_data(self, rule):
self.db = DB(config=rule['source_node'])
sql_str = self.get_sql(rule)
#self.print_debug(sql_str)
data_item = PluginsData()
self.db.execute(sql_str)
data_item.fields = DB.field_names(self.db.cursor)
data_item.values = self.db.cursor.fetchall()
self.print_debug('Loaded {0} rows'.format(
len(data_item.values)
)
)
return data_item
def run(self, rule, data=None):
data = self.load_data(rule)
first_field = data.fields[0]
last_field = 'json'
self.load_transformations(rule, first_field, last_field)
e = self.make_entities(data)
return self.package_entities(e)
def init(rule):
return JsonizerPlugin(rule)
| gr33ndata/rivellino | plugins/jsonizer_plugin.py | Python | mit | 6,285 |
# coding=utf-8
import unittest
"""675. Cut Off Trees for Golf Event
https://leetcode.com/problems/cut-off-trees-for-golf-event/description/
You are asked to cut off trees in a forest for a golf event. The forest is
represented as a non-negative 2D map, in this map:
1. `0` represents the `obstacle` can't be reached.
2. `1` represents the `ground` can be walked through.
3. `The place with number bigger than 1` represents a `tree` can be walked through, and this positive number represents the tree's height.
You are asked to cut off **all** the trees in this forest in the order of
tree's height - always cut off the tree with lowest height first. And after
cutting, the original place has the tree will become a grass (value 1).
You will start from the point (0, 0) and you should output the minimum steps
**you need to walk** to cut off all the trees. If you can't cut off all the
trees, output -1 in that situation.
You are guaranteed that no two `trees` have the same height and there is at
least one tree needs to be cut off.
**Example 1:**
**Input:**
[
[1,2,3],
[0,0,4],
[7,6,5]
]
**Output:** 6
**Example 2:**
**Input:**
[
[1,2,3],
[0,0,0],
[7,6,5]
]
**Output:** -1
**Example 3:**
**Input:**
[
[2,3,4],
[0,0,5],
[8,7,6]
]
**Output:** 6
**Explanation:** You started from the point (0,0) and you can cut off the tree in (0,0) directly without walking.
**Hint** : size of the given matrix will not exceed 50x50.
Similar Questions:
"""
class Solution(object):
def cutOffTree(self, forest):
"""
:type forest: List[List[int]]
:rtype: int
"""
def test(self):
pass
if __name__ == "__main__":
unittest.main()
| openqt/algorithms | leetcode/python/lc675-cut-off-trees-for-golf-event.py | Python | gpl-3.0 | 1,872 |
import math
import time
t1 = time.time()
def f(x):
return math.floor(math.pow(2,30.403243784-x*x))/math.pow(10,9)
'''
a = -1
for i in range(2000):
a = f(a)
print(a)
'''
# E a,b that satisfy
# a = f(b)
# b = f(a)
N = int(math.pow(10,12))
t = [0,0]
fp = True
a = -1
for i in range(N):
a = f(a)
if fp:
if t[0] == a:
break
t[0] = a
else:
if t[1] == a:
break
t[1] = a
fp = not fp
print(sum(t))
print("time:",time.time()-t1)
| Adamssss/projectEuler | pb197.py | Python | mit | 519 |
"""Util function to baseline correct data
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
from .utils import logger, verbose
@verbose
def rescale(data, times, baseline, mode, verbose=None, copy=True):
"""Rescale aka baseline correct data
Parameters
----------
data : array
It can be of any shape. The only constraint is that the last
dimension should be time.
times : 1D array
Time instants is seconds.
baseline : tuple or list of length 2, or None
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used. If None, no correction is applied.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
copy : bool
Operate on a copy of the data, or in place.
Returns
-------
data_scaled: array
Array of same shape as data after rescaling.
"""
if copy:
data = data.copy()
valid_modes = ['logratio', 'ratio', 'zscore', 'mean', 'percent']
if mode not in valid_modes:
raise Exception('mode should be any of : %s' % valid_modes)
if baseline is not None:
logger.info("Applying baseline correction ... (mode: %s)" % mode)
bmin, bmax = baseline
if bmin is None:
imin = 0
else:
imin = int(np.where(times >= bmin)[0][0])
if bmax is None:
imax = len(times)
else:
imax = int(np.where(times <= bmax)[0][-1]) + 1
# avoid potential "empty slice" warning
if data.shape[-1] > 0:
mean = np.mean(data[..., imin:imax], axis=-1)[..., None]
else:
mean = 0 # otherwise we get an ugly nan
if mode == 'mean':
data -= mean
if mode == 'logratio':
data /= mean
data = np.log10(data) # a value of 1 means 10 times bigger
if mode == 'ratio':
data /= mean
elif mode == 'zscore':
std = np.std(data[..., imin:imax], axis=-1)[..., None]
data -= mean
data /= std
elif mode == 'percent':
data -= mean
data /= mean
else:
logger.info("No baseline correction applied...")
return data
| jaeilepp/eggie | mne/baseline.py | Python | bsd-2-clause | 2,975 |
import os
from typing import Dict, Optional, Tuple, List
import numpy as np
import tensorflow as tf
import nn_utils.math_utils as math_utils
from losses import multi_gpu_wrapper
from models.neural_pil.embd_store import EmbeddingStore
from models.neural_pil.models import CoarseModel, FineModel
from nn_utils.nerf_layers import add_base_args
from utils.training_setup_utils import (
StateRestoration,
StateRestorationItem,
get_num_gpus,
)
class NeuralPILModel(tf.keras.Model):
def __init__(self, num_images, args, **kwargs):
super(NeuralPILModel, self).__init__(**kwargs)
# Setup the models
self.fine_model = FineModel(args, **kwargs)
illumination_latent_dim = self.fine_model.illumination_net.latent_units
self.coarse_model = CoarseModel(illumination_latent_dim, args, **kwargs)
self.rotating_object = args.rotating_object
# Randomize if training
self.randomized = args.perturb == 1.0
print("Running with pertubation:", self.randomized)
self.advanced_loss_done = args.advanced_loss_done
# Setup the place where the SGs are stored
self.single_env = args.single_env
num_illuminations = 1 if args.single_env else num_images
mean_std = np.load(
os.path.join(
args.illumination_network_path, "illumination_latent_mean_std.npy"
),
allow_pickle=True,
)
self.illumination_embedding_store = EmbeddingStore(
num_illuminations,
illumination_latent_dim,
latent_mean=mean_std[0],
latent_std=mean_std[1],
)
self.illumination_embedding_store(
tf.convert_to_tensor([0])
) # Ensure the store is built
# Add loss for wb
self.num_gpu = max(1, get_num_gpus())
self.global_batch_size = args.batch_size * self.num_gpu
self.mse = multi_gpu_wrapper(
tf.keras.losses.MeanSquaredError,
self.global_batch_size,
)
self.cosine_similarity = multi_gpu_wrapper(
tf.keras.losses.CosineSimilarity, self.global_batch_size
)
# Setup the state restoration
states = [
StateRestorationItem("coarse", self.coarse_model),
StateRestorationItem("fine", self.fine_model),
StateRestorationItem("illuminations", self.illumination_embedding_store),
]
self.state_restoration = StateRestoration(args, states)
def save(self, step):
# Save weights for step
self.state_restoration.save(step)
def restore(self, step: Optional[int] = None) -> int:
# Restore weights from step or if None the latest one
return self.state_restoration.restore(step)
@tf.function
def call(
self,
ray_origins: tf.Tensor,
ray_directions: tf.Tensor,
camera_pose: tf.Tensor,
near_bound: float,
far_bound: float,
illumination_idx: tf.Tensor,
ev100: tf.Tensor,
illumination_factor: tf.Tensor,
training=False,
illumination_context_override=None,
high_quality=False,
) -> Tuple[Dict[str, tf.Tensor], Dict[str, tf.Tensor]]:
"""Evaluate the network for given ray origins and directions and camera pose
Args:
ray_origins (tf.Tensor(float32), [batch, 3]): the ray origin.
ray_directions (tf.(float32), [batch, 3]): the ray direction.
camera_pose (tf.Tensor(float32), [batch, 3, 3]): the camera matrix.
near_bound (float): the near clipping point.
far_bound (float): the far clipping point.
illumination_idx (tf.Tensor(int32), [1]): the illumination index.
ev100 (tf.Tensor(float32), [1]): the ev100 value of the image.
training (bool, optional): Whether this is a training step or not.
Activates noise and pertub ray features if requested. Defaults to True.
Returns:
coarse_payload (Dict[str, tf.Tensor]): dict with the payload for the coarse
network.
fine_payload (Dict[str, tf.Tensor]): dict with the payload for the fine
network.
"""
# Get current embedding
if illumination_context_override is None:
illumination_context = self.illumination_embedding_store(
illumination_idx if not self.single_env else tf.convert_to_tensor([0])
)
else:
illumination_context = illumination_context_override
# Coarse step
(
coarse_payload,
coarse_z_samples,
coarse_weights,
) = self.coarse_model.render_rays(
ray_origins,
ray_directions,
near_bound,
far_bound,
tf.stop_gradient(illumination_context),
randomized=training and self.randomized,
overwrite_num_samples=(self.coarse_model.num_samples * 2)
if high_quality
else None,
)
fine_payload, _, _ = self.fine_model.render_rays(
ray_origins,
ray_directions,
coarse_z_samples,
coarse_weights,
camera_pose,
illumination_context,
ev100,
illumination_factor,
randomized=training and self.randomized,
overwrite_num_samples=(self.fine_model.num_samples * 2)
if high_quality
else None,
)
return coarse_payload, fine_payload
def distributed_call(
self,
strategy,
chunk_size: int,
ray_origins: tf.Tensor,
ray_directions: tf.Tensor,
camera_pose: tf.Tensor,
near_bound: float,
far_bound: float,
illumination_idx: tf.Tensor,
ev100: tf.Tensor,
illumination_factor: tf.Tensor,
training=False,
illumination_context_override=None,
high_quality=False,
):
if illumination_context_override is not None:
illumination_idx = tf.cast(
tf.ones_like(illumination_idx) * illumination_context_override, tf.int32
)
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = (
tf.data.experimental.AutoShardPolicy.DATA
)
dp_df = (
tf.data.Dataset.from_tensor_slices((ray_origins, ray_directions))
.batch(chunk_size // (2 if high_quality else 1) * get_num_gpus())
.with_options(options)
)
dp_dist_df = strategy.experimental_distribute_dataset(dp_df)
coarse_payloads: Dict[str, List[tf.Tensor]] = {}
fine_payloads: Dict[str, List[tf.Tensor]] = {}
def add_to_dict(to_add, main_dict):
for k, v in to_add.items():
arr = main_dict.get(
k,
[],
)
arr.extend(v)
main_dict[k] = arr
return main_dict
for dp in dp_dist_df:
rays_o, rays_d = dp
# Render image.
coarse_result_per_replica, fine_result_per_replica = strategy.run(
self.call,
(
rays_o,
rays_d,
camera_pose,
near_bound,
far_bound,
illumination_idx,
ev100,
illumination_factor,
training,
illumination_context_override,
high_quality,
),
)
coarse_result = {
k: strategy.experimental_local_results(v)
for k, v in coarse_result_per_replica.items()
}
fine_result = {
k: strategy.experimental_local_results(v)
for k, v in fine_result_per_replica.items()
}
coarse_payloads = add_to_dict(coarse_result, coarse_payloads)
fine_payloads = add_to_dict(fine_result, fine_payloads)
coarse_payloads = {k: tf.concat(v, 0) for k, v in coarse_payloads.items()}
fine_payloads = {k: tf.concat(v, 0) for k, v in fine_payloads.items()}
return coarse_payloads, fine_payloads
@tf.function
def train_step(
self,
ray_origins: tf.Tensor,
ray_directions: tf.Tensor,
camera_pose: tf.Tensor,
near_bound: float,
far_bound: float,
illumination_idx: tf.Tensor,
ev100: tf.Tensor,
illumination_factor: tf.Tensor,
is_wb_ref_image: tf.Tensor,
wb_input_value: tf.Tensor,
optimizer: tf.keras.optimizers.Optimizer,
target: tf.Tensor,
target_mask: tf.Tensor,
lambda_advanced_loss: tf.Tensor,
lambda_slow_fade_loss: tf.Tensor,
) -> Tuple[tf.Tensor, tf.Tensor, Dict[str, tf.Tensor], Dict[str, tf.Tensor]]:
"""Perform a single training step.
Args:
ray_origins (tf.Tensor(float32), [batch, 3]): the ray origin.
ray_directions (tf.Tensor(float32), [batch, 3]): the ray direction.
camera_pose (tf.Tensor(float32), [batch, 3, 3]): the camera matrix.
near_bound (tf.Tensor(float32), [1]): the near clipping point.
far_bound (tf.Tensor(float32), [1]): the far clipping point.
illumination_idx (tf.Tensor(int32), [1]): the illumination index.
ev100 (tf.Tensor(float32), [1]): the ev100 value of the image.
is_wb_ref_image (tf.Tensor(bool) [1]): whether the current image is
a reference whitebalance image.
wb_input_value (tf.Tensor(float32) [1, 3]): if `is_wb_ref_image` then
this is defines the whitebalance value.
optimizer (tf.keras.optimizers.Optimizer): the optimizer to use in the
train step.
target (tf.Tensor(float32), [batch, 3]): the rgb target from the image.
target_mask (tf.Tensor(float32), [batch, 1]): the segmentation mask
target from the image.
lambda_advanced_loss (tf.Tensor(float32), [1]): current advanced loss
interpolation value.
Returns:
coarse_payload (Dict[str, tf.Tensor]): dict with the payload for the
coarse network.
fine_payload (Dict[str, tf.Tensor]): dict with the payload for the fine
network.
loss (tf.Tensor(float32), [1]): the joint loss.
coarse_losses (Dict[str, tf.Tensor]): a dict of loss names with the
evaluated losses. "loss" stores the final loss of the layer.
fine_losses (Dict[str, tf.Tensor]): a dict of loss names with the evaluated
losses. "loss" stores the final loss of the layer.
"""
with tf.GradientTape() as tape:
wb_loss = float(0)
if is_wb_ref_image[0]:
illumination_context = self.illumination_embedding_store(
illumination_idx
if not self.single_env
else tf.convert_to_tensor([0])
)
wb_scene = math_utils.saturate(
self.fine_model.get_white_balance_under_illumination(
illumination_context,
ray_origins,
)
* illumination_factor
* math_utils.ev100_to_exp(ev100)
)
wb_loss = self.mse(wb_input_value, wb_scene)
coarse_result, fine_result = self.call(
ray_origins,
ray_directions,
camera_pose,
near_bound,
far_bound,
illumination_idx,
ev100,
illumination_factor,
training=True,
)
coarse_losses = self.coarse_model.calculate_losses(
coarse_result, target, target_mask, lambda_advanced_loss
)
view_vector = math_utils.normalize(-1 * ray_directions)
fine_losses = self.fine_model.calculate_losses(
fine_result,
target,
target_mask,
view_vector,
lambda_advanced_loss,
lambda_slow_fade_loss,
)
loss = coarse_losses["loss"] + fine_losses["loss"] + wb_loss
grad_vars = (
self.coarse_model.trainable_variables
+ self.fine_model.trainable_variables
+ self.illumination_embedding_store.trainable_variables
)
gradients = tape.gradient(loss, grad_vars)
gradients, global_norm = tf.clip_by_global_norm(gradients, 1.0)
optimizer.apply_gradients(zip(gradients, grad_vars))
return loss, wb_loss, coarse_losses, fine_losses
@tf.function
def illumination_single_step(
self,
camera_pose,
ray_directions,
diffuse,
specular,
roughness,
normal,
alpha,
illumination_idx,
target,
ev100,
illumination_factor,
mse,
optimizer,
):
with tf.name_scope("IlluminationSingleStep"):
with tf.name_scope("Prepare"):
is_background = alpha < 0.3
select_on_background = lambda x, y: tf.where(
math_utils.repeat(is_background, tf.shape(x)[-1], -1),
x,
y,
)
with tf.name_scope("Directions"):
view_directions = -1 * ray_directions
org_viewdirections = view_directions
(
view_directions,
reflection_direction,
) = self.fine_model.renderer.calculate_reflection_direction(
view_directions,
normal,
camera_pose=camera_pose[0] if self.rotating_object else None,
)
view_directions = select_on_background(
org_viewdirections, view_directions
)
reflection_direction = select_on_background(
math_utils.normalize(ray_directions),
reflection_direction,
)
specular_roughness = select_on_background(
tf.zeros_like(roughness),
roughness,
)
with tf.name_scope("Execute"):
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(self.illumination_embedding_store.trainable_variables)
# Get current embedding
illumination_context = self.illumination_embedding_store(
illumination_idx
if not self.single_env
else tf.convert_to_tensor([0])
)
with tf.name_scope("Illumination"):
# Illumination net expects a B, S, C shape.
# Add a fake one and remove b dim afterward
diffuse_irradiance = (
self.fine_model.illumination_net.call_multi_samples(
reflection_direction[None, ...],
tf.ones_like( # Just sample with maximum roughness
roughness[None, ...]
),
illumination_context,
)[0]
)
# Illumination net expects a B, S, C shape.
# Add a fake one and remove b dim afterward
specular_irradiance = (
self.fine_model.illumination_net.call_multi_samples(
reflection_direction[None, ...],
specular_roughness[None, ...],
illumination_context,
)[0]
)
with tf.name_scope("Render"):
render = (
self.fine_model.renderer(
view_directions,
normal,
diffuse_irradiance,
specular_irradiance,
diffuse,
specular,
roughness,
)
* illumination_factor
)
with tf.name_scope("RenderPostProcess"):
# Replace background with illumination evaluation
render = select_on_background(specular_irradiance, render)
# Auto exposure + srgb to model camera setup
render = math_utils.white_background_compose(
self.fine_model.camera_post_processing(render, ev100), alpha
)
with tf.name_scope("Loss"):
illum_loss = mse(
math_utils.white_background_compose(
tf.reshape(target, (-1, 3)), alpha
),
render,
)
tf.debugging.check_numerics(illum_loss, "loss illum")
grad_vars = self.illumination_embedding_store.trainable_variables
gradients = tape.gradient(illum_loss, grad_vars)
optimizer.apply_gradients(zip(gradients, grad_vars))
return illum_loss
def illumination_steps(
self,
ray_origins: tf.Tensor,
ray_directions: tf.Tensor,
camera_pose: tf.Tensor,
near_bound: float,
far_bound: float,
illumination_idx: tf.Tensor,
ev100: tf.Tensor,
illumination_factor: tf.Tensor,
optimizer: tf.keras.optimizers.Optimizer,
target: tf.Tensor,
steps: int,
chunk_size: int = 1024,
strategy=tf.distribute.get_strategy(),
) -> tf.Tensor:
"""Perform a illumination optimization step. This only performs the illumination
with a fixed network.
Args:
ray_origins (tf.Tensor(float32), [batch, 3]): the ray origin.
ray_directions (tf.Tensor(float32), [batch, 3]): the ray direction.
camera_pose (tf.Tensor(float32), [batch, 3, 3]): the camera matrix.
near_bound (tf.Tensor(float32), [1]): the near clipping point.
far_bound (tf.Tensor(float32), [1]): the far clipping point.
illumination_idx (tf.Tensor(int32), [1]): the illumination index.
ev100 (tf.Tensor(float32), [1]): the ev100 value of the image.
optimizer (tf.keras.optimizers.Optimizer): the optimizer to use in the
train step.
target (tf.Tensor(float32), [batch, 3]): the rgb target from the image.
steps (int): the number of optimization steps to perform.
chunk_size (int): If specified runs the sampling in
batches. Runs everything jointly if 0.
Returns:
tf.Tensor(float32), [1]: the loss after the optimization
"""
mse = multi_gpu_wrapper(tf.keras.losses.MeanSquaredError, target.shape[0])
_, fine_result = self.distributed_call(
strategy,
chunk_size,
ray_origins,
ray_directions,
camera_pose,
near_bound,
far_bound,
illumination_idx,
ev100,
illumination_factor,
False,
)
data = [
ray_directions,
target,
fine_result["diffuse"],
fine_result["specular"],
fine_result["roughness"],
fine_result["normal"],
fine_result["acc_alpha"][..., None],
]
dp_df = tf.data.Dataset.from_tensor_slices((*data,)).batch(
chunk_size * get_num_gpus()
)
dp_dist_df = strategy.experimental_distribute_dataset(dp_df)
for i in tf.range(steps):
total_loss = 0
for dp in dp_dist_df:
ray_d, trgt, diff, spec, rgh, nrm, alp = dp
illum_loss_per_replica = strategy.run(
self.illumination_single_step,
(
camera_pose[:1],
ray_d,
diff,
spec,
rgh,
nrm,
alp,
illumination_idx[:1],
trgt,
ev100,
illumination_factor,
mse,
optimizer,
),
)
illum_loss = strategy.reduce(
tf.distribute.ReduceOp.SUM, illum_loss_per_replica, axis=None
)
total_loss = total_loss + illum_loss
return total_loss
def calculate_illumination_factor(
self, camera_position, ev100_target, illumination_context_overwrite=None
):
if illumination_context_overwrite is None:
illumination_context = self.illumination_embedding_store.latent_mean[
None, :
]
else:
illumination_context = illumination_context_overwrite
lum_volume = tf.reduce_mean(
self.fine_model.get_white_balance_under_illumination(
illumination_context, camera_position
)
)
target = 0.8 / tf.maximum(math_utils.ev100_to_exp(ev100_target), 1e-5)
factor = target / lum_volume
return factor
@classmethod
def add_args(cls, parser):
"""Add the base nerf arguments to the parser with addition
to the specific Neural-PIL ones.
Args:
parser (ArgumentParser): the current ArgumentParser.
Returns:
ArgumentParser: the modified ArgumentParser for call chaining
"""
add_base_args(parser)
parser.add_argument(
"--coarse_samples",
type=int,
default=64,
help="number of coarse samples per ray in a fixed grid",
)
parser.add_argument(
"--fine_samples",
type=int,
default=128,
help="number of additional samples per ray based on the coarse samples",
)
parser.add_argument(
"--fourier_frequency",
type=int,
default=10,
help="log2 of max freq for positional encoding",
)
parser.add_argument(
"--net_width", type=int, default=256, help="channels per layer"
)
parser.add_argument(
"--net_depth", type=int, default=8, help="layers in network"
)
# Illumination configs
parser.add_argument(
"--rotating_object",
action="store_true",
help=(
"The object is rotating instead of the camera. The illumination then "
"needs to stay static"
),
)
parser.add_argument(
"--single_env",
action="store_true",
help="All input images are captured under a single environment",
)
# Render configs
parser.add_argument(
"--brdf_preintegration_path",
default="data/neural_pil/BRDFLut.hdr",
help="Path to the preintegrated BRDF LUT.",
)
# Coarse configs
parser.add_argument(
"-lindisp",
"--linear_disparity_sampling",
action="store_true",
help="Coarse sampling linearly in disparity rather than depth",
)
# Fine configs
parser.add_argument(
"--brdf_network_path",
default="data/neural_pil/brdf-network",
help="Path to the BRDF decoder config and weights",
)
parser.add_argument(
"--illumination_network_path",
default="data/neural_pil/illumination-network",
help="Path to the illumination network config and weights",
)
parser.add_argument(
"--direct_rgb",
action="store_true",
help=(
"Also performs a direct RGB color prediction. This is useful in the "
"beginning of the training."
),
)
parser.add_argument(
"--advanced_loss_done",
type=int,
default=60000,
help=(
"Exponentially decays losses. After this many steps the loss is reduced"
"by 3 magnitudes"
),
)
parser.add_argument("--ablate_brdf_smae", action="store_true")
return parser
| cgtuebingen/Neural-PIL | models/neural_pil/neural_pil_model.py | Python | mit | 25,376 |
'''
Create a PDF copy with split-up pages (posterize)
---------------------------------------------------
License: GNU GPL V3
(c) 2018 Jorj X. McKie
Usage
------
python posterize.py input.pdf
Result
-------
The file "poster-input.pdf" with 4 output pages for every input page:
top-lef, top-right, bottom-left, bottom-right part on separate pages.
Notes
-----
(1) Output file is chosen to have page dimensions of 1/4 of input.
(2) Easily adapt the example to make n pages per input, or decide per each
input page or whatever.
Dependencies
------------
PyMuPDF 1.12.2 or later
'''
from __future__ import print_function
import fitz, sys
infile = sys.argv[1] # input file name
src = fitz.open(infile)
doc = fitz.open() # empty output PDF
for spage in src: # for each page in input
xref = 0 # force initial page copy to output
r = spage.rect # input page rectangle
d = fitz.Rect(spage.CropBoxPosition, # CropBox displacement if not
spage.CropBoxPosition) # starting at (0, 0)
#--------------------------------------------------------------------------
# example: cut input page into 2 x 2 parts
#--------------------------------------------------------------------------
r1 = r * 0.5 # top left rect
r2 = r1 + (r1.width, 0, r1.width, 0) # top right rect
r3 = r1 + (0, r1.height, 0, r1.height) # bottom left rect
r4 = fitz.Rect(r1.br, r.br) # bottom right rect
rect_list = [r1, r2, r3, r4] # put them in a list
for rx in rect_list: # run thru rect list
rx += d # add the CropBox displacement
page = doc.newPage(-1, # new output page with rx dimensions
width = rx.width,
height = rx.height)
xref = page.showPDFpage(page.rect, # fill all new page with the image
src, # input document
spage.number, # input page number
clip = rx, # which part to use of input page
reuse_xref = xref) # copy input page once only
# that's it, save output file
doc.save("poster-" + src.name,
garbage = 4, # eliminate duplicate objects
deflate = True) # compress stuff where possible
| JorjMcKie/PyMuPDF-Utilities | examples/posterize.py | Python | gpl-3.0 | 2,611 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import argparse
import pytest
import spack.cmd.find
from spack.util.pattern import Bunch
@pytest.fixture(scope='module')
def parser():
"""Returns the parser for the module command"""
prs = argparse.ArgumentParser()
spack.cmd.find.setup_parser(prs)
return prs
@pytest.fixture()
def specs():
s = []
return s
@pytest.fixture()
def mock_display(monkeypatch, specs):
"""Monkeypatches the display function to return its first argument"""
def display(x, *args, **kwargs):
specs.extend(x)
monkeypatch.setattr(spack.cmd.find, 'display_specs', display)
def test_query_arguments():
query_arguments = spack.cmd.find.query_arguments
# Default arguments
args = Bunch(
only_missing=False,
missing=False,
unknown=False,
explicit=False,
implicit=False
)
q_args = query_arguments(args)
assert 'installed' in q_args
assert 'known' in q_args
assert 'explicit' in q_args
assert q_args['installed'] is True
assert q_args['known'] is any
assert q_args['explicit'] is any
# Check that explicit works correctly
args.explicit = True
q_args = query_arguments(args)
assert q_args['explicit'] is True
args.explicit = False
args.implicit = True
q_args = query_arguments(args)
assert q_args['explicit'] is False
@pytest.mark.usefixtures('database', 'mock_display')
class TestFindWithTags(object):
def test_tag1(self, parser, specs):
args = parser.parse_args(['--tags', 'tag1'])
spack.cmd.find.find(parser, args)
assert len(specs) == 2
assert 'mpich' in [x.name for x in specs]
assert 'mpich2' in [x.name for x in specs]
def test_tag2(self, parser, specs):
args = parser.parse_args(['--tags', 'tag2'])
spack.cmd.find.find(parser, args)
assert len(specs) == 1
assert 'mpich' in [x.name for x in specs]
def test_tag2_tag3(self, parser, specs):
args = parser.parse_args(['--tags', 'tag2', '--tags', 'tag3'])
spack.cmd.find.find(parser, args)
assert len(specs) == 0
| wscullin/spack | lib/spack/spack/test/cmd/find.py | Python | lgpl-2.1 | 3,356 |
# Copyright (C) 2007, Red Hat, Inc.
# Copyright (C) 2007, One Laptop Per Child
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import logging
from gi.repository import Gtk
from gi.repository import Gdk
from sugar3.graphics.radiotoolbutton import RadioToolButton
from sugar3.graphics.icon import Icon
from sugar3.graphics.xocolor import XoColor
from sugar3.graphics import style
from sugar3 import profile
from jarabe.frame import clipboard
from jarabe.frame.clipboardmenu import ClipboardMenu
from jarabe.frame.frameinvoker import FrameWidgetInvoker
from jarabe.frame.notification import NotificationIcon
import jarabe.frame
class ClipboardIcon(RadioToolButton):
__gtype_name__ = 'SugarClipboardIcon'
def __init__(self, cb_object, group):
RadioToolButton.__init__(self, group=group)
self.props.palette_invoker = FrameWidgetInvoker(self)
self.palette_invoker.props.toggle_palette = True
self._cb_object = cb_object
self.owns_clipboard = False
self.props.sensitive = False
self.props.active = False
self._notif_icon = None
self._current_percent = None
self._icon = Icon()
color = profile.get_color()
self._icon.props.xo_color = color
self.set_icon_widget(self._icon)
self._icon.show()
cb_service = clipboard.get_instance()
cb_service.connect('object-state-changed',
self._object_state_changed_cb)
cb_service.connect('object-selected', self._object_selected_cb)
child = self.get_child()
child.connect('drag_data_get', self._drag_data_get_cb)
self.connect('notify::active', self._notify_active_cb)
def create_palette(self):
palette = ClipboardMenu(self._cb_object)
palette.set_group_id('frame')
return palette
def get_object_id(self):
return self._cb_object.get_id()
def _drag_data_get_cb(self, widget, context, selection, target_type,
event_time):
target_atom = selection.get_target()
target_name = target_atom.name()
logging.debug('_drag_data_get_cb: requested target %s', target_name)
data = self._cb_object.get_formats()[target_name].get_data()
selection.set(target_atom, 8, data)
def _put_in_clipboard(self):
logging.debug('ClipboardIcon._put_in_clipboard')
if self._cb_object.get_percent() < 100:
raise ValueError('Object is not complete, cannot be put into the'
' clipboard.')
targets = self._get_targets()
if targets:
x_clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
if not x_clipboard.set_with_data(targets,
self._clipboard_data_get_cb,
self._clipboard_clear_cb,
targets):
logging.error('GtkClipboard.set_with_data failed!')
else:
self.owns_clipboard = True
def _clipboard_data_get_cb(self, x_clipboard, selection, info, targets):
selection_target = selection.get_target()
entries_targets = [entry.target for entry in targets]
if not str(selection_target) in entries_targets:
logging.warning('ClipboardIcon._clipboard_data_get_cb: asked %s'
' but only have %r.', selection_target,
entries_targets)
return
data = self._cb_object.get_formats()[str(selection_target)].get_data()
selection.set(selection_target, 8, data)
def _clipboard_clear_cb(self, x_clipboard, targets):
logging.debug('ClipboardIcon._clipboard_clear_cb')
self.owns_clipboard = False
def _object_state_changed_cb(self, cb_service, cb_object):
if cb_object != self._cb_object:
return
if cb_object.get_icon():
self._icon.props.icon_name = cb_object.get_icon()
if self._notif_icon:
self._notif_icon.props.icon_name = self._icon.props.icon_name
else:
self._icon.props.icon_name = 'application-octet-stream'
child = self.get_child()
child.connect('drag-begin', self._drag_begin_cb)
child.drag_source_set(Gdk.ModifierType.BUTTON1_MASK,
self._get_targets(),
Gdk.DragAction.COPY)
if cb_object.get_percent() == 100:
self.props.sensitive = True
# Clipboard object became complete. Make it the active one.
if self._current_percent < 100 and cb_object.get_percent() == 100:
self.props.active = True
self.show_notification()
self._current_percent = cb_object.get_percent()
def _object_selected_cb(self, cb_service, object_id):
if object_id != self._cb_object.get_id():
return
self.props.active = True
self.show_notification()
logging.debug('ClipboardIcon: %r was selected', object_id)
def show_notification(self):
self._notif_icon = NotificationIcon()
self._notif_icon.props.icon_name = self._icon.props.icon_name
self._notif_icon.props.xo_color = \
XoColor('%s,%s' % (self._icon.props.stroke_color,
self._icon.props.fill_color))
frame = jarabe.frame.get_view()
frame.add_notification(self._notif_icon, Gtk.CornerType.BOTTOM_LEFT)
def _drag_begin_cb(self, widget, context):
# TODO: We should get the pixbuf from the icon, with colors, etc.
icon_theme = Gtk.IconTheme.get_default()
pixbuf = icon_theme.load_icon(self._icon.props.icon_name,
style.STANDARD_ICON_SIZE, 0)
Gtk.drag_set_icon_pixbuf(context, pixbuf, hot_x=pixbuf.props.width / 2,
hot_y=pixbuf.props.height / 2)
def _notify_active_cb(self, widget, pspec):
if self.props.active:
self._put_in_clipboard()
else:
self.owns_clipboard = False
def _get_targets(self):
targets = []
for format_type in self._cb_object.get_formats().keys():
targets.append(Gtk.TargetEntry.new(format_type,
Gtk.TargetFlags.SAME_APP, 0))
return targets
| tchx84/sugar | src/jarabe/frame/clipboardicon.py | Python | gpl-2.0 | 7,080 |
from django.contrib.syndication.views import Feed as BaseFeed
from django.utils.feedgenerator import Atom1Feed, Rss201rev2Feed
class GeoFeedMixin:
"""
This mixin provides the necessary routines for SyndicationFeed subclasses
to produce simple GeoRSS or W3C Geo elements.
"""
def georss_coords(self, coords):
"""
In GeoRSS coordinate pairs are ordered by lat/lon and separated by
a single white space. Given a tuple of coordinates, return a string
GeoRSS representation.
"""
return ' '.join('%f %f' % (coord[1], coord[0]) for coord in coords)
def add_georss_point(self, handler, coords, w3c_geo=False):
"""
Adds a GeoRSS point with the given coords using the given handler.
Handles the differences between simple GeoRSS and the more popular
W3C Geo specification.
"""
if w3c_geo:
lon, lat = coords[:2]
handler.addQuickElement('geo:lat', '%f' % lat)
handler.addQuickElement('geo:lon', '%f' % lon)
else:
handler.addQuickElement('georss:point', self.georss_coords((coords,)))
def add_georss_element(self, handler, item, w3c_geo=False):
"""Add a GeoRSS XML element using the given item and handler."""
# Getting the Geometry object.
geom = item.get('geometry')
if geom is not None:
if isinstance(geom, (list, tuple)):
# Special case if a tuple/list was passed in. The tuple may be
# a point or a box
box_coords = None
if isinstance(geom[0], (list, tuple)):
# Box: ( (X0, Y0), (X1, Y1) )
if len(geom) == 2:
box_coords = geom
else:
raise ValueError('Only should be two sets of coordinates.')
else:
if len(geom) == 2:
# Point: (X, Y)
self.add_georss_point(handler, geom, w3c_geo=w3c_geo)
elif len(geom) == 4:
# Box: (X0, Y0, X1, Y1)
box_coords = (geom[:2], geom[2:])
else:
raise ValueError('Only should be 2 or 4 numeric elements.')
# If a GeoRSS box was given via tuple.
if box_coords is not None:
if w3c_geo:
raise ValueError('Cannot use simple GeoRSS box in W3C Geo feeds.')
handler.addQuickElement('georss:box', self.georss_coords(box_coords))
else:
# Getting the lowercase geometry type.
gtype = str(geom.geom_type).lower()
if gtype == 'point':
self.add_georss_point(handler, geom.coords, w3c_geo=w3c_geo)
else:
if w3c_geo:
raise ValueError('W3C Geo only supports Point geometries.')
# For formatting consistent w/the GeoRSS simple standard:
# http://georss.org/1.0#simple
if gtype in ('linestring', 'linearring'):
handler.addQuickElement('georss:line', self.georss_coords(geom.coords))
elif gtype in ('polygon',):
# Only support the exterior ring.
handler.addQuickElement('georss:polygon', self.georss_coords(geom[0].coords))
else:
raise ValueError('Geometry type "%s" not supported.' % geom.geom_type)
# ### SyndicationFeed subclasses ###
class GeoRSSFeed(Rss201rev2Feed, GeoFeedMixin):
def rss_attributes(self):
attrs = super().rss_attributes()
attrs['xmlns:georss'] = 'http://www.georss.org/georss'
return attrs
def add_item_elements(self, handler, item):
super().add_item_elements(handler, item)
self.add_georss_element(handler, item)
def add_root_elements(self, handler):
super().add_root_elements(handler)
self.add_georss_element(handler, self.feed)
class GeoAtom1Feed(Atom1Feed, GeoFeedMixin):
def root_attributes(self):
attrs = super().root_attributes()
attrs['xmlns:georss'] = 'http://www.georss.org/georss'
return attrs
def add_item_elements(self, handler, item):
super().add_item_elements(handler, item)
self.add_georss_element(handler, item)
def add_root_elements(self, handler):
super().add_root_elements(handler)
self.add_georss_element(handler, self.feed)
class W3CGeoFeed(Rss201rev2Feed, GeoFeedMixin):
def rss_attributes(self):
attrs = super().rss_attributes()
attrs['xmlns:geo'] = 'http://www.w3.org/2003/01/geo/wgs84_pos#'
return attrs
def add_item_elements(self, handler, item):
super().add_item_elements(handler, item)
self.add_georss_element(handler, item, w3c_geo=True)
def add_root_elements(self, handler):
super().add_root_elements(handler)
self.add_georss_element(handler, self.feed, w3c_geo=True)
# ### Feed subclass ###
class Feed(BaseFeed):
"""
This is a subclass of the `Feed` from `django.contrib.syndication`.
This allows users to define a `geometry(obj)` and/or `item_geometry(item)`
methods on their own subclasses so that geo-referenced information may
placed in the feed.
"""
feed_type = GeoRSSFeed
def feed_extra_kwargs(self, obj):
return {'geometry': self._get_dynamic_attr('geometry', obj)}
def item_extra_kwargs(self, item):
return {'geometry': self._get_dynamic_attr('item_geometry', item)}
| sametmax/Django--an-app-at-a-time | ignore_this_directory/django/contrib/gis/feeds.py | Python | mit | 5,732 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Zadara Storage Inc.
# Copyright (c) 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Abstraction of the underlying connection to VC."""
from nova.vsa import fake
def get_connection():
# Return an object that is able to talk to VCs
return fake.FakeVcConnection()
| nii-cloud/dodai-compute | nova/vsa/connection.py | Python | apache-2.0 | 888 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2017, James R. Barlow (https://github.com/jbarlow83/)
"""
Support functions called by the C++ library binding layer. Not intended to be
called from Python, and subject to change at any time.
"""
from typing import Callable, Dict, Union
from warnings import warn
from pikepdf import Dictionary, Name, Pdf
def update_xmp_pdfversion(pdf: Pdf, version: str) -> None:
if Name.Metadata not in pdf.Root:
return # Don't create an empty XMP object just to store the version
with pdf.open_metadata(set_pikepdf_as_editor=False, update_docinfo=False) as meta:
if 'pdf:PDFVersion' in meta:
meta['pdf:PDFVersion'] = version
def _alpha(n: int) -> str:
"""Excel-style column numbering A..Z, AA..AZ..BA..ZZ.., AAA."""
if n < 1:
raise ValueError(f"Can't represent {n} in alphabetic numbering")
p = []
while n > 0:
n, r = divmod(n - 1, 26)
p.append(r)
base = ord('A')
ords = [(base + v) for v in reversed(p)]
return ''.join(chr(o) for o in ords)
def _roman(n: int) -> str:
"""Converts integer n to Roman numeral representation as a string."""
if not (1 <= n <= 5000):
raise ValueError(f"Can't represent {n} in Roman numerals")
roman_numerals = (
(1000, 'M'),
(900, 'CM'),
(500, 'D'),
(400, 'CD'),
(100, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
)
roman = ""
for value, numeral in roman_numerals:
while n >= value:
roman += numeral
n -= value
return roman
LABEL_STYLE_MAP: Dict[Name, Callable[[int], str]] = {
Name.D: str,
Name.A: _alpha,
Name.a: lambda x: _alpha(x).lower(),
Name.R: _roman,
Name.r: lambda x: _roman(x).lower(),
}
def label_from_label_dict(label_dict: Union[int, Dictionary]) -> str:
"""Convert a label dictionary returned by QPDF into a text string."""
if isinstance(label_dict, int):
return str(label_dict)
label = ''
if Name.P in label_dict:
prefix = label_dict[Name.P]
label += str(prefix)
# If there is no S, return only the P portion
if Name.S in label_dict:
# St defaults to 1
numeric_value = label_dict[Name.St] if Name.St in label_dict else 1
if not isinstance(numeric_value, int):
warn(
"Page label dictionary has invalid non-integer start value", UserWarning
)
numeric_value = 1
style = label_dict[Name.S]
if isinstance(style, Name):
style_fn = LABEL_STYLE_MAP[style]
value = style_fn(numeric_value)
label += value
else:
warn("Page label dictionary has invalid page label style", UserWarning)
return label
| pikepdf/pikepdf | src/pikepdf/_cpphelpers.py | Python | mpl-2.0 | 3,084 |
import numpy as np
def median_absolute_deviation(x, M=None) :
if M is None:
M = np.median(x)
return np.median(abs(x - M))
def _biweight_location_work(x, M, MAD, c) :
u = (x-M) / (c*MAD)
w = abs(u) < 1.0
if w.sum() == 0.0:
return M
term = (1.0 - u[w]**2)**2
num = ((x[w]-M)*term).sum()
den = term.sum()
CBI = M + num/den
return CBI
def biweight_location(x, c=6.0, NaN=None, niter=4) :
if NaN:
x = x[np.isfinite(x)]
if len(x) == 0:
return np.NaN
CBI = np.median(x)
for i in xrange(niter):
M = CBI
MAD = median_absolute_deviation(x, M)
CBI = _biweight_location_work(x, M, MAD, c)
return CBI
def _biweight_scale_work(x, M, MAD, c) :
u = (x-M) / (c*MAD)
w = abs(u) < 1.0
if w.sum() == 0:
return np.NaN
term = u[w]**2
num = (len(x) * ((x[w]-M)**2 * (1-term)**4).sum())**0.5
den = abs(((1.0-term)*(1.0-5*term)).sum())
return num/den
def biweight_scale(x, zero=None, c=9.0, NaN=None, niter=4) :
if NaN:
x = x[np.isfinite(x)]
if zero is None:
M = biweight_location(x)
else:
M = zero
MAD = median_absolute_deviation(x, M)
SBI = MAD/0.6745
for i in xrange(niter):
MAD = SBI*0.6745
SBI = _biweight_scale_work(x, M, MAD, c)
return SBI
| jmeyers314/astrophotoreduce | biweight.py | Python | mit | 1,347 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.