hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
766aabca8ded1833740d64e0a0444184a722c075 | 1,660 | py | Python | app/core/tests/test_admin.py | Chinmay-395/recipe-app-api | 76a6436ec3191b6d21986f627471606ae64bb357 | [
"MIT"
] | null | null | null | app/core/tests/test_admin.py | Chinmay-395/recipe-app-api | 76a6436ec3191b6d21986f627471606ae64bb357 | [
"MIT"
] | null | null | null | app/core/tests/test_admin.py | Chinmay-395/recipe-app-api | 76a6436ec3191b6d21986f627471606ae64bb357 | [
"MIT"
] | null | null | null | from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
""" it is is a function that is ran before every test """
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email="admin@londonappdev.com",
password='password@123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='test@londonappdev.com',
password='password@123',
name='Test user full name'
)
def test_user_listed(self):
""" Test that users are listed on user page """
url = reverse('admin:core_user_changelist')
res = self.client.get(url) # res stands for response
print("response of http request-------", res)
self.assertContains(res, self.user.name)
""" what `assertContains` it does is it checks that
the HTTP response was HTTP 200 and that
it looks into the actual content of this 'res'
"""
self.assertContains(res, self.user.email)
def test_user_change_page(self):
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEquals(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| 36.086957 | 69 | 0.615663 |
4bdaef7a74ca18d0e7ce0ccad9d5a5b82619fd37 | 20,210 | py | Python | tests/python/contrib/test_ethosu/infra.py | cli99/tvm | 6c6e873a1325a32418108daad6e38f3df8c37660 | [
"Apache-2.0"
] | null | null | null | tests/python/contrib/test_ethosu/infra.py | cli99/tvm | 6c6e873a1325a32418108daad6e38f3df8c37660 | [
"Apache-2.0"
] | null | null | null | tests/python/contrib/test_ethosu/infra.py | cli99/tvm | 6c6e873a1325a32418108daad6e38f3df8c37660 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module provides infrastructure to verify the correctness of
the command stream produced.
Currently it will invoke vela to generate a vela-optimized tflite
in which the command stream is contained as a custom operator.
This class include methods to parse the custom operator to extract
the command stream and perform an equivalency check for single operator
test cases.
"""
from typing import List
import os
import struct
import numpy
import math
from enum import IntEnum
import tensorflow as tf
from ethosu.vela.register_command_stream_generator import CmdMode
from ethosu.vela.register_command_stream_generator import cmd0
from ethosu.vela.register_command_stream_generator import cmd1
import tvm
from tvm import relay
import tvm.relay.backend.contrib.ethosu.op as ethosu_ops
from tvm.topi.nn.utils import get_pad_tuple
from tests.python.relay.aot.aot_test_utils import (
AOTCompiledTestModel,
AOTDataLinkage,
AOTTestModel,
AOTTestRunner,
compile_models,
run_and_check,
)
class AttachType(IntEnum):
kGroupRoot = 1
kInline = 2
kInlinedAlready = 3
kScope = 4
kScanUpdate = 5
class VelaArtifacts:
def __init__(self):
self.cs = dict()
self.flash = dict()
self.sram = dict()
self.npu_ops = set()
def print_payload(payload):
cmds = deserialize_command_stream(payload)
for cmd_val in cmds:
cmd, val = parse_cmd(cmd_val)
s = str(cmd)
s = s.ljust(40)
s += str(val)
print(s)
def parse_cmd(binary_cmd):
code = binary_cmd[0] & 0x0000FFFF # lower 16 bits
param = binary_cmd[0] >> 16 # higher 16 bits
payload_mode = CmdMode(code & CmdMode.Mask)
if payload_mode == CmdMode.Payload32:
command = cmd1(code & CmdMode.CmdOpMask)
value = binary_cmd[1]
else:
command = cmd0(code & CmdMode.CmdOpMask)
value = param
return command, value
def check_cmms_equivalency(vela_cmd, vela_value, tvm_value, ignore_cmds=None):
if ignore_cmds is None:
ignore_cmds = []
if vela_value != tvm_value and vela_cmd not in ignore_cmds:
raise RuntimeError(
"ValueMismatch :: vela={}, tvm={} for command:{}".format(
vela_value, tvm_value, vela_cmd
)
)
def verify_cmms(cmms_tvm_blob, cmms_vela_blob):
vela_cmm = deserialize_command_stream(cmms_vela_blob)
tvm_cmm = deserialize_command_stream(cmms_tvm_blob)
cmms_zip = zip(vela_cmm, tvm_cmm)
first_ifm_found = False
last_ofm_found = False
ignore_commands = (
cmd1.NPU_SET_DMA0_SRC,
cmd1.NPU_SET_DMA0_DST,
cmd1.NPU_SET_WEIGHT_BASE,
cmd1.NPU_SET_OFM_BASE0,
cmd1.NPU_SET_IFM_BASE0,
cmd1.NPU_SET_SCALE_BASE,
)
ofm_region_params = []
ofm_bases = []
for vela_cmm, tvm_cmm in cmms_zip:
vela_cmd, vela_value = parse_cmd(vela_cmm)
tvm_cmd, tvm_value = parse_cmd(tvm_cmm)
assert vela_cmd == tvm_cmd
# The first IFM region could be different, but it needs to be 1 and 3.
if vela_cmd == cmd0.NPU_SET_IFM_REGION and not first_ifm_found:
if vela_value == 1 and tvm_value == 3:
first_ifm_found = True
continue
if vela_cmd == cmd1.NPU_SET_IFM_BASE0 and not first_ifm_found:
if tvm_value != 0:
raise RuntimeError("ValueError :: tvm primary ifm base should be zero")
continue
# OFM regions should be cached to be checked later
if vela_cmd == cmd0.NPU_SET_OFM_REGION:
ofm_region_params.append((vela_value, tvm_value))
continue
# OFM bases should be cached to be checked later
if vela_cmd == cmd1.NPU_SET_OFM_BASE0:
ofm_bases.append((vela_value, tvm_value))
continue
check_cmms_equivalency(vela_cmd, vela_value, tvm_value, ignore_commands)
# The last OFM region could be different but it should be 1 and 4.
last_vela_ofm_region, last_tvm_ofm_region = ofm_region_params.pop(-1)
if not (last_vela_ofm_region == 1 and last_tvm_ofm_region == 4):
raise RuntimeError(
"ValueMismatch :: vela={}, tvm={} for last ofm region it should be 1 and 4 respectively".format(
last_vela_ofm_region, last_tvm_ofm_region
)
)
# The rest of the OFM regions should be the same.
for vela_value, tvm_value in ofm_region_params:
check_cmms_equivalency(vela_cmd, vela_value, tvm_value, ignore_commands)
# The last OFM base should be zero for tvm
_, last_tvm_ofm_base = ofm_bases.pop(-1)
if not last_tvm_ofm_base == 0:
raise RuntimeError("ValueError :: tvm primary ofm base should be zero")
def deserialize_command_stream(blob):
assert isinstance(blob, bytes)
payload_bytes = struct.unpack("<{0}I".format(len(blob) // 4), blob)
cmms = []
# remove_header
payload_bytes = payload_bytes[8:]
idx = 0
while idx < len(payload_bytes):
cmd = []
code = payload_bytes[idx]
idx += 1
cmd.append(code)
payload_mode = CmdMode(code & CmdMode.Mask)
if payload_mode == CmdMode.Payload32:
value = payload_bytes[idx]
idx += 1
cmd.append(value)
cmms.append(cmd)
return cmms
def create_test_runner(accel="ethos-u55-256", enable_usmp=True):
file_dir = os.path.dirname(os.path.abspath(__file__))
test_root = os.path.join(file_dir, "reference_system")
_, ethosu_variant, ethosu_macs = accel.split("-")
ethosu_variant = ethosu_variant.upper()
return AOTTestRunner(
makefile="corstone300",
prologue="""
uart_init();
EthosuInit();
struct ethosu_driver* ethos_u = ethosu_reserve_driver();
""",
epilogue="""
ethosu_release_driver(ethos_u);
""",
includes=["uart.h", "ethosu_55.h", "ethosu_mod.h", "hard_fault.h"],
parameters={
"ETHOSU_TEST_ROOT": test_root,
"NPU_MACS": ethosu_macs,
"NPU_VARIANT": ethosu_variant,
},
pass_config={
"relay.ext.ethos-u.options": {
"accelerator_config": accel,
},
"tir.usmp.enable": enable_usmp,
},
)
def build_source(
module, inputs, outputs, accel="ethos-u55-256", output_tolerance=0, enable_usmp=True
):
test_runner = create_test_runner(accel, enable_usmp)
return compile_models(
models=AOTTestModel(
module=module,
inputs=inputs,
outputs=outputs,
output_tolerance=output_tolerance,
extra_memory_in_bytes=0,
),
interface_api="c",
use_unpacked_api=True,
workspace_byte_alignment=16,
pass_config=test_runner.pass_config,
)
def verify_source(
models: List[AOTCompiledTestModel],
accel="ethos-u55-256",
):
"""
This method verifies the generated source from an NPU module by building it and running on an FVP.
"""
interface_api = "c"
test_runner = create_test_runner(accel)
run_and_check(
models,
test_runner,
interface_api,
workspace_byte_alignment=16,
data_linkage=AOTDataLinkage(section="ethosu_scratch", alignment=16),
)
def flatten_numpy_data(data):
"""Flatten the numpy tensor to be single dimensional"""
total_elements = data.size
reshaped_data = numpy.reshape(data, [total_elements])
return reshaped_data
class InputGenerator:
def __init__(self, random_state):
self._random_state = random_state
def generate(self, size, dtype):
if dtype == numpy.float32:
print("random float32")
return self._random_state.uniform(-1, 1, size).astype(dtype)
else:
print("random (u)int min=%d max=%d", numpy.iinfo(dtype).min, numpy.iinfo(dtype).max)
low = numpy.iinfo(dtype).min
high = numpy.iinfo(dtype).max + 1
return self._random_state.randint(low, high, size, dtype)
def generate_ref_data_tflite(model):
"""
This method generates reference data by running the specified model on tflite with random input data.
The random input data and generated output data are returned.
"""
expected_output_data = {}
interpreter = tf.lite.Interpreter(model_content=model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Initialize random generators with a fixed seed to get deterministic results
seed = 0
random_state = numpy.random.RandomState(seed)
inputgen = InputGenerator(random_state)
# Generate input data
input_data = {
input_detail["name"]: inputgen.generate(
input_detail["shape"],
input_detail["dtype"],
)
for input_detail in input_details
}
for index, value in enumerate(input_data.values()):
interpreter.set_tensor(index, value)
interpreter.invoke()
expected_output_data = {
output_detail["name"]: interpreter.get_tensor(output_detail["index"])
for output_detail in output_details
}
return input_data, expected_output_data
def make_partitioned_function(relay_op):
ifm0 = relay.analysis.free_vars(relay_op)
ifm_shape = ifm0[0].type_annotation.shape
ifm_dtype = ifm0[0].type_annotation.dtype
ifm = relay.var("ifm", shape=ifm_shape, dtype=ifm_dtype)
glb_ethosu = relay.GlobalVar("tvmgen_default_ethosu_main_0")
func = (
relay.Function(ifm0, relay_op)
.with_attr("Inline", 1)
.with_attr("Compiler", "ethos-u")
.with_attr("global_symbol", "tvmgen_default_ethosu_main_0")
.with_attr("Primitive", 1)
)
mod = tvm.IRModule()
mod[glb_ethosu] = func
mod = relay.transform.InferType()(mod)
call = relay.Call(glb_ethosu, [ifm])
mod["main"] = relay.Function([ifm], call)
mod = relay.transform.InferType()(mod)
return mod
def generate_weights_data(shape, dtype):
size = 1
for dim in shape:
size *= dim
return (numpy.arange(size) % 255).reshape(shape).astype(dtype)
def get_convolutional_args(call, include_buffers=False, remove_constants=False):
"""A method to extract the arguments from conv2d or depthwise_conv2d extern call."""
args = call.args
conv_args = []
remove_indices = [0]
if remove_constants:
remove_indices += [41, 42, 44, 45]
for i, arg in enumerate(args):
if i in remove_indices:
continue
elif isinstance(arg, tvm.tir.expr.IntImm) or isinstance(arg, tvm.tir.expr.FloatImm):
conv_args.append(arg.value)
elif isinstance(arg, tvm.tir.expr.Load) and not include_buffers:
conv_args.append(arg.index)
else:
conv_args.append(arg)
return conv_args
def compute_ofm_shape(ifm_shape, padding, kernel_shape, strides, dilation=[1, 1]):
assert len(strides) == 2
assert len(dilation) == 2
assert len(kernel_shape) == 2
if padding.lower() == "valid":
h = math.ceil((ifm_shape[1] - (kernel_shape[0] - 1) * dilation[0]) / strides[0])
w = math.ceil((ifm_shape[2] - (kernel_shape[1] - 1) * dilation[1]) / strides[1])
if padding.lower() == "same":
h = math.ceil(ifm_shape[1] / strides[0])
w = math.ceil(ifm_shape[2] / strides[1])
ofm_shape = [ifm_shape[0], h, w, ifm_shape[3]]
return ofm_shape
def compute_padding_shape(ifm_shape, ofm_shape, padding, kernel_shape, strides, dilation=[1, 1]):
assert len(strides) == 2
assert len(dilation) == 2
assert len(kernel_shape) == 2
if padding.lower() == "valid":
return [0, 0, 0, 0]
if padding.lower() == "same":
effective_kernel_shape = [
dilation[0] * (kernel_shape[0] - 1) + 1,
dilation[1] * (kernel_shape[1] - 1) + 1,
]
pad_along_height = max(
(ofm_shape[1] - 1) * strides[0] + effective_kernel_shape[0] - ifm_shape[1], 0
)
pad_along_width = max(
(ofm_shape[2] - 1) * strides[1] + effective_kernel_shape[1] - ifm_shape[2], 0
)
pad_top = pad_along_height // 2
pad_bottom = pad_along_height - pad_top
pad_left = pad_along_width // 2
pad_right = pad_along_width - pad_left
return [pad_top, pad_left, pad_bottom, pad_right]
def make_ethosu_conv2d(
ifm,
ifm_channels,
ofm_channels,
kernel_shape,
padding,
strides,
dilation,
lut=relay.const([], dtype="int8"),
activation="NONE",
ifm_layout="NHWC",
ofm_layout="NHWC",
weight_dtype="int8",
scale_bias_dtype="uint8",
rounding_mode="TFL",
upscale="NONE",
):
# conv params
weight_shape = (ofm_channels, kernel_shape[0], kernel_shape[1], ifm_channels)
padding = get_pad_tuple(padding, kernel_shape)
scale_bias_data = generate_weights_data((weight_shape[0], 10), scale_bias_dtype)
scale_bias = relay.const(scale_bias_data, dtype=scale_bias_dtype)
weight_data = generate_weights_data(weight_shape, weight_dtype)
weight = relay.const(weight_data, dtype=weight_dtype)
conv = ethosu_ops.ethosu_conv2d(
ifm,
weight,
scale_bias,
lut=lut,
ifm_scale=0.5,
ifm_zero_point=10,
weight_zero_point=12,
ofm_scale=0.25,
ofm_zero_point=14,
kernel_shape=kernel_shape,
ofm_channels=ofm_channels,
strides=strides,
padding=padding,
dilation=dilation,
activation=activation,
clip_min=10 if activation == "CLIP" else 0,
clip_max=100 if activation == "CLIP" else 0,
rounding_mode=rounding_mode,
upscale=upscale,
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
return conv
def make_ethosu_depthwise_conv2d(
ifm,
channels,
kernel_shape,
padding,
strides,
dilation,
activation="NONE",
ifm_layout="NHWC",
ofm_layout="NHWC",
weight_dtype="int8",
scale_bias_dtype="uint8",
rounding_mode="TFL",
):
# params
weight_shape = (channels, kernel_shape[0], kernel_shape[1], 1)
padding = get_pad_tuple(padding, kernel_shape)
scale_bias_data = generate_weights_data((weight_shape[0], 10), scale_bias_dtype)
scale_bias = relay.const(scale_bias_data, dtype=scale_bias_dtype)
weight_data = generate_weights_data(weight_shape, weight_dtype)
weight = relay.const(weight_data, dtype=weight_dtype)
depthwise = ethosu_ops.ethosu_depthwise_conv2d(
ifm,
weight,
scale_bias,
lut=relay.const([], dtype="int8"),
ifm_scale=0.6,
ifm_zero_point=11,
weight_zero_point=13,
ofm_scale=0.26,
ofm_zero_point=15,
kernel_shape=kernel_shape,
ofm_channels=channels,
strides=strides,
padding=padding,
dilation=dilation,
activation=activation,
clip_min=15 if activation == "CLIP" else 0,
clip_max=105 if activation == "CLIP" else 0,
rounding_mode=rounding_mode,
upscale="NONE",
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
return depthwise
def get_pooling_args(call, include_buffers=False):
args = call.args
pooling_args = []
for i, arg in enumerate(args):
if isinstance(arg, tvm.tir.expr.IntImm) or isinstance(arg, tvm.tir.expr.FloatImm):
pooling_args.append(arg.value)
elif isinstance(arg, tvm.tir.expr.Load) and not include_buffers:
pooling_args.append(arg.index)
else:
pooling_args.append(arg)
return pooling_args
def make_ethosu_pooling(
ifm,
pooling_type,
pool_shape,
ofm_channels,
strides,
padding,
activation="NONE",
ifm_layout="NHWC",
ofm_layout="NHWC",
rounding_mode="TFL",
upscale="NONE",
):
pooling = ethosu_ops.ethosu_pooling(
ifm,
lut=relay.const([], dtype="int8"),
pooling_type=pooling_type,
ifm_scale=1,
ifm_zero_point=0,
ofm_scale=1,
ofm_zero_point=0,
pool_shape=pool_shape,
ofm_channels=ofm_channels,
strides=strides,
padding=padding,
activation=activation,
clip_min=10 if activation == "CLIP" else 0,
clip_max=100 if activation == "CLIP" else 0,
rounding_mode=rounding_mode,
upscale=upscale,
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
return pooling
def get_binary_elementwise_args(call, include_buffers=False):
args = call.args
binary_elementwise_args = []
for i, arg in enumerate(args):
if isinstance(arg, tvm.tir.expr.IntImm) or isinstance(arg, tvm.tir.expr.FloatImm):
binary_elementwise_args.append(arg.value)
elif isinstance(arg, tvm.tir.expr.Load) and not include_buffers:
binary_elementwise_args.append(arg.index)
else:
binary_elementwise_args.append(arg)
return binary_elementwise_args
def make_ethosu_binary_elementwise(
ifm,
ifm2,
ifm_channels,
ifm2_channels,
operator_type,
ofm_dtype,
reversed_operands=False,
activation="NONE",
ifm_layout="NHWC",
ifm2_layout="NHWC",
ofm_layout="NHWC",
rounding_mode="TFL",
):
ethosu_binary_elementwise = ethosu_ops.ethosu_binary_elementwise(
ifm=ifm,
ifm2=ifm2,
lut=relay.const([], dtype="int8"),
operator_type=operator_type,
ifm_scale=1,
ifm_zero_point=0,
ifm2_scale=1,
ifm2_zero_point=0,
ofm_scale=1,
ofm_zero_point=0,
ifm_channels=ifm_channels,
ifm2_channels=ifm2_channels,
reversed_operands=reversed_operands,
activation=activation,
ofm_dtype=ofm_dtype,
clip_min=10 if activation == "CLIP" else 0,
clip_max=100 if activation == "CLIP" else 0,
rounding_mode=rounding_mode,
ifm_layout=ifm_layout,
ifm2_layout=ifm2_layout,
ofm_layout=ofm_layout,
)
return ethosu_binary_elementwise
def make_ethosu_identity(
ifm,
lut=relay.const([], dtype="int8"),
ifm_scale=1,
ifm_zero_point=0,
ofm_scale=1,
ofm_zero_point=0,
activation="NONE",
):
identity = ethosu_ops.ethosu_identity(
ifm,
lut=lut,
ifm_scale=ifm_scale,
ifm_zero_point=ifm_zero_point,
ofm_scale=ofm_scale,
ofm_zero_point=ofm_zero_point,
activation=activation,
)
return identity
def make_ethosu_unary_elementwise(
ifm,
ofm_channels,
operator_type,
activation="NONE",
ifm_layout="NHWC",
ofm_layout="NHWC",
rounding_mode="TFL",
):
ethosu_unary_elementwise = ethosu_ops.ethosu_unary_elementwise(
ifm=ifm,
lut=relay.const([], dtype="int8"),
operator_type=operator_type,
ifm_scale=1,
ifm_zero_point=0,
ofm_scale=1,
ofm_zero_point=0,
ofm_channels=ofm_channels,
activation=activation,
clip_min=10 if activation == "CLIP" else 0,
clip_max=100 if activation == "CLIP" else 0,
rounding_mode=rounding_mode,
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
return ethosu_unary_elementwise
| 30.29985 | 108 | 0.653488 |
94f913efc0c68f367c79b42ef46760b594f8d2c2 | 2,802 | py | Python | scripts/kernel_config_parser.py | jbauman42/bob-build | acf70b912e6b0af30a856ac94ad0aae299444419 | [
"Apache-2.0"
] | 1 | 2021-07-03T23:48:05.000Z | 2021-07-03T23:48:05.000Z | scripts/kernel_config_parser.py | jbauman42/bob-build | acf70b912e6b0af30a856ac94ad0aae299444419 | [
"Apache-2.0"
] | null | null | null | scripts/kernel_config_parser.py | jbauman42/bob-build | acf70b912e6b0af30a856ac94ad0aae299444419 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2021 Arm Limited.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
logger = logging.getLogger(__name__)
g_kernel_configs = dict()
def get_config_file_path(kdir):
return os.path.join(kdir, '.config')
def parse_kernel_config(kdir):
"""Parse kernel configuration from provided directory"""
config_file = get_config_file_path(kdir)
config = dict()
try:
with open(config_file, "rt") as fp:
for line in fp.readlines():
try:
(key, val) = line.split("=")
config[key.strip()] = val.strip().strip('"')
except ValueError:
pass
except IOError as e:
logger.error("Failed to open kernel config file in %s:", config_file)
return config
def get_value(kdir, option):
"""Return value of the kernel config opption"""
global g_kernel_configs
if kdir not in g_kernel_configs:
g_kernel_configs[kdir] = parse_kernel_config(kdir)
return g_kernel_configs[kdir].get(option)
def option_enabled(kdir, option):
"""Return true if a given kernel config option is enabled"""
return get_value(kdir, option) == 'y'
def get_arch(kdir):
arch_dir = os.path.join(kdir, "arch")
if not os.path.exists(arch_dir):
logger.error("'arch' subdirectory in kernel %s does not exist", kdir)
return None
# Each directory in $KDIR/arch has a config option with the same name.
for arch in os.listdir(arch_dir):
if not os.path.isfile(os.path.join(arch_dir, arch, "Kconfig")):
continue
if option_enabled(kdir, "CONFIG_" + arch.upper()):
return arch
if option_enabled(kdir, "CONFIG_UML"):
return "um"
elif option_enabled(kdir, "CONFIG_X86_32"):
return "i386"
elif option_enabled(kdir, "CONFIG_X86_64"):
return "x86_64"
elif option_enabled(kdir, "CONFIG_PPC32") or option_enabled(kdir, "CONFIG_PPC64"):
return "powerpc"
elif (option_enabled(kdir, "CONFIG_SUPERH") or option_enabled(kdir, "CONFIG_SUPERH32") or
option_enabled(kdir, "CONFIG_SUPERH64")):
return "sh"
logger.error("Couldn't get ARCH for kernel %s", kdir)
return None
| 31.133333 | 93 | 0.66631 |
7a55f86cde27e0aa796cad4bd9f4679339b1e7eb | 40,340 | py | Python | awx/settings/defaults.py | Xiol/awx | 80a17987ff636c6750edf20a67c99e27f99fcccd | [
"Apache-2.0"
] | null | null | null | awx/settings/defaults.py | Xiol/awx | 80a17987ff636c6750edf20a67c99e27f99fcccd | [
"Apache-2.0"
] | null | null | null | awx/settings/defaults.py | Xiol/awx | 80a17987ff636c6750edf20a67c99e27f99fcccd | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
import os
import re # noqa
import sys
from datetime import timedelta
from celery.schedules import crontab
# global settings
from django.conf import global_settings
# ugettext lazy
from django.utils.translation import ugettext_lazy as _
# Update this module's local settings from the global settings module.
this_module = sys.modules[__name__]
for setting in dir(global_settings):
if setting == setting.upper():
setattr(this_module, setting, getattr(global_settings, setting))
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
def is_testing(argv=None):
import sys
'''Return True if running django or py.test unit tests.'''
if 'PYTEST_CURRENT_TEST' in os.environ.keys():
return True
argv = sys.argv if argv is None else argv
if len(argv) >= 1 and ('py.test' in argv[0] or 'py/test.py' in argv[0]):
return True
elif len(argv) >= 2 and argv[1] == 'test':
return True
return False
def IS_TESTING(argv=None):
return is_testing(argv)
if "pytest" in sys.modules:
from unittest import mock
with mock.patch('__main__.__builtins__.dir', return_value=[]):
import ldap
else:
import ldap
DEBUG = True
SQL_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'awx.sqlite3'),
'ATOMIC_REQUESTS': True,
'TEST': {
# Test database cannot be :memory: for inventory tests.
'NAME': os.path.join(BASE_DIR, 'awx_test.sqlite3'),
},
}
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
#
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
USE_TZ = True
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'ui', 'static'),
os.path.join(BASE_DIR, 'static'),
)
# Absolute filesystem path to the directory where static file are collected via
# the collectstatic command.
STATIC_ROOT = os.path.join(BASE_DIR, 'public', 'static')
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(BASE_DIR, 'public', 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
LOGIN_URL = '/api/login/'
# Absolute filesystem path to the directory to host projects (with playbooks).
# This directory should not be web-accessible.
PROJECTS_ROOT = os.path.join(BASE_DIR, 'projects')
# Absolute filesystem path to the directory for job status stdout (default for
# development and tests, default for production defined in production.py). This
# directory should not be web-accessible
JOBOUTPUT_ROOT = os.path.join(BASE_DIR, 'job_output')
# Absolute filesystem path to the directory to store logs
LOG_ROOT = os.path.join(BASE_DIR)
# The heartbeat file for the tower scheduler
SCHEDULE_METADATA_LOCATION = os.path.join(BASE_DIR, '.tower_cycle')
# Django gettext files path: locale/<lang-code>/LC_MESSAGES/django.po, django.mo
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale'),
)
# Graph of resources that can have named-url
NAMED_URL_GRAPH = {}
# Maximum number of the same job that can be waiting to run when launching from scheduler
# Note: This setting may be overridden by database settings.
SCHEDULE_MAX_JOBS = 10
SITE_ID = 1
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'p7z7g1ql4%6+(6nlebb6hdk7sd^&fnjpal308%n%+p^_e6vo1y'
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# HTTP headers and meta keys to search to determine remote host name or IP. Add
# additional items to this list, such as "HTTP_X_FORWARDED_FOR", if behind a
# reverse proxy.
REMOTE_HOST_HEADERS = ['REMOTE_ADDR', 'REMOTE_HOST']
# If Tower is behind a reverse proxy/load balancer, use this setting to
# whitelist the proxy IP addresses from which Tower should trust custom
# REMOTE_HOST_HEADERS header values
# REMOTE_HOST_HEADERS = ['HTTP_X_FORWARDED_FOR', ''REMOTE_ADDR', 'REMOTE_HOST']
# PROXY_IP_WHITELIST = ['10.0.1.100', '10.0.1.101']
# If this setting is an empty list (the default), the headers specified by
# REMOTE_HOST_HEADERS will be trusted unconditionally')
PROXY_IP_WHITELIST = []
CUSTOM_VENV_PATHS = []
# Note: This setting may be overridden by database settings.
STDOUT_MAX_BYTES_DISPLAY = 1048576
# Returned in the header on event api lists as a recommendation to the UI
# on how many events to display before truncating/hiding
MAX_UI_JOB_EVENTS = 4000
# Returned in index.html, tells the UI if it should make requests
# to update job data in response to status changes websocket events
UI_LIVE_UPDATES_ENABLED = True
# The maximum size of the ansible callback event's res data structure
# beyond this limit and the value will be removed
MAX_EVENT_RES_DATA = 700000
# Note: This setting may be overridden by database settings.
EVENT_STDOUT_MAX_BYTES_DISPLAY = 1024
# The amount of time before a stdout file is expired and removed locally
# Note that this can be recreated if the stdout is downloaded
LOCAL_STDOUT_EXPIRE_TIME = 2592000
# The number of processes spawned by the callback receiver to process job
# events into the database
JOB_EVENT_WORKERS = 4
# The maximum size of the job event worker queue before requests are blocked
JOB_EVENT_MAX_QUEUE_SIZE = 10000
# Disallow sending session cookies over insecure connections
SESSION_COOKIE_SECURE = True
# Seconds before sessions expire.
# Note: This setting may be overridden by database settings.
SESSION_COOKIE_AGE = 1800
# Maximum number of per-user valid, concurrent sessions.
# -1 is unlimited
# Note: This setting may be overridden by database settings.
SESSIONS_PER_USER = -1
CSRF_USE_SESSIONS = False
# Disallow sending csrf cookies over insecure connections
CSRF_COOKIE_SECURE = True
# Limit CSRF cookies to browser sessions
CSRF_COOKIE_AGE = None
TEMPLATES = [
{
'NAME': 'default',
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'debug': DEBUG,
'context_processors': [# NOQA
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'awx.ui.context_processors.settings',
'awx.ui.context_processors.version',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
'loaders': [(
'django.template.loaders.cached.Loader',
('django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',),
)],
'builtins': ['awx.main.templatetags.swagger'],
},
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
},
]
ROOT_URLCONF = 'awx.urls'
WSGI_APPLICATION = 'awx.wsgi.application'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'oauth2_provider',
'rest_framework',
'django_extensions',
'channels',
'polymorphic',
'taggit',
'social_django',
'corsheaders',
'awx.conf',
'awx.main',
'awx.api',
'awx.ui',
'awx.sso',
'solo'
]
INTERNAL_IPS = ('127.0.0.1',)
MAX_PAGE_SIZE = 200
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'awx.api.pagination.Pagination',
'PAGE_SIZE': 25,
'DEFAULT_AUTHENTICATION_CLASSES': (
'awx.api.authentication.LoggedOAuth2Authentication',
'awx.api.authentication.SessionAuthentication',
'awx.api.authentication.LoggedBasicAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'awx.api.permissions.ModelAccessPermission',
),
'DEFAULT_FILTER_BACKENDS': (
'awx.api.filters.TypeFilterBackend',
'awx.api.filters.FieldLookupBackend',
'rest_framework.filters.SearchFilter',
'awx.api.filters.OrderByBackend',
),
'DEFAULT_PARSER_CLASSES': (
'awx.api.parsers.JSONParser',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'awx.api.renderers.BrowsableAPIRenderer',
),
'DEFAULT_METADATA_CLASS': 'awx.api.metadata.Metadata',
'EXCEPTION_HANDLER': 'awx.api.views.api_exception_handler',
'VIEW_DESCRIPTION_FUNCTION': 'awx.api.generics.get_view_description',
'NON_FIELD_ERRORS_KEY': '__all__',
'DEFAULT_VERSION': 'v2',
#'URL_FORMAT_OVERRIDE': None,
}
AUTHENTICATION_BACKENDS = (
'awx.sso.backends.LDAPBackend',
'awx.sso.backends.LDAPBackend1',
'awx.sso.backends.LDAPBackend2',
'awx.sso.backends.LDAPBackend3',
'awx.sso.backends.LDAPBackend4',
'awx.sso.backends.LDAPBackend5',
'awx.sso.backends.RADIUSBackend',
'awx.sso.backends.TACACSPlusBackend',
'social_core.backends.google.GoogleOAuth2',
'social_core.backends.github.GithubOAuth2',
'social_core.backends.github.GithubOrganizationOAuth2',
'social_core.backends.github.GithubTeamOAuth2',
'social_core.backends.azuread.AzureADOAuth2',
'awx.sso.backends.SAMLAuth',
'django.contrib.auth.backends.ModelBackend',
)
# Django OAuth Toolkit settings
OAUTH2_PROVIDER_APPLICATION_MODEL = 'main.OAuth2Application'
OAUTH2_PROVIDER_ACCESS_TOKEN_MODEL = 'main.OAuth2AccessToken'
OAUTH2_PROVIDER_REFRESH_TOKEN_MODEL = 'oauth2_provider.RefreshToken'
OAUTH2_PROVIDER = {'ACCESS_TOKEN_EXPIRE_SECONDS': 31536000000,
'AUTHORIZATION_CODE_EXPIRE_SECONDS': 600}
ALLOW_OAUTH2_FOR_EXTERNAL_USERS = False
# LDAP server (default to None to skip using LDAP authentication).
# Note: This setting may be overridden by database settings.
AUTH_LDAP_SERVER_URI = None
# Disable LDAP referrals by default (to prevent certain LDAP queries from
# hanging with AD).
# Note: This setting may be overridden by database settings.
AUTH_LDAP_CONNECTION_OPTIONS = {
ldap.OPT_REFERRALS: 0,
ldap.OPT_NETWORK_TIMEOUT: 30
}
# Radius server settings (default to empty string to skip using Radius auth).
# Note: These settings may be overridden by database settings.
RADIUS_SERVER = ''
RADIUS_PORT = 1812
RADIUS_SECRET = ''
# TACACS+ settings (default host to empty string to skip using TACACS+ auth).
# Note: These settings may be overridden by database settings.
TACACSPLUS_HOST = ''
TACACSPLUS_PORT = 49
TACACSPLUS_SECRET = ''
TACACSPLUS_SESSION_TIMEOUT = 5
TACACSPLUS_AUTH_PROTOCOL = 'ascii'
# Enable / Disable HTTP Basic Authentication used in the API browser
# Note: Session limits are not enforced when using HTTP Basic Authentication.
# Note: This setting may be overridden by database settings.
AUTH_BASIC_ENABLED = True
# If set, serve only minified JS for UI.
USE_MINIFIED_JS = False
# Email address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'tower@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Tower] '
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
# Default to skipping isolated host key checking (the initial connection will
# hang on an interactive "The authenticity of host example.org can't be
# established" message)
AWX_ISOLATED_HOST_KEY_CHECKING = False
# The number of seconds to sleep between status checks for jobs running on isolated nodes
AWX_ISOLATED_CHECK_INTERVAL = 30
# The timeout (in seconds) for launching jobs on isolated nodes
AWX_ISOLATED_LAUNCH_TIMEOUT = 600
# Ansible connection timeout (in seconds) for communicating with isolated instances
AWX_ISOLATED_CONNECTION_TIMEOUT = 10
# The time (in seconds) between the periodic isolated heartbeat status check
AWX_ISOLATED_PERIODIC_CHECK = 600
# Verbosity level for isolated node management tasks
AWX_ISOLATED_VERBOSITY = 0
# Memcached django cache configuration
# CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'LOCATION': '127.0.0.1:11211',
# 'TIMEOUT': 864000,
# 'KEY_PREFIX': 'tower_dev',
# }
# }
DEVSERVER_DEFAULT_ADDR = '0.0.0.0'
DEVSERVER_DEFAULT_PORT = '8013'
# Set default ports for live server tests.
os.environ.setdefault('DJANGO_LIVE_TEST_SERVER_ADDRESS', 'localhost:9013-9199')
BROKER_DURABILITY = True
BROKER_POOL_LIMIT = None
BROKER_URL = 'amqp://guest:guest@localhost:5672//'
CELERY_DEFAULT_QUEUE = 'awx_private_queue'
CELERYBEAT_SCHEDULE = {
'tower_scheduler': {
'task': 'awx.main.tasks.awx_periodic_scheduler',
'schedule': timedelta(seconds=30),
'options': {'expires': 20,}
},
'admin_checks': {
'task': 'awx.main.tasks.run_administrative_checks',
'schedule': timedelta(days=30)
},
'cluster_heartbeat': {
'task': 'awx.main.tasks.cluster_node_heartbeat',
'schedule': timedelta(seconds=60),
'options': {'expires': 50,}
},
'purge_stdout_files': {
'task': 'awx.main.tasks.purge_old_stdout_files',
'schedule': timedelta(days=7)
},
'gather_analytics': {
'task': 'awx.main.tasks.gather_analytics',
'schedule': crontab(hour=0)
},
'task_manager': {
'task': 'awx.main.scheduler.tasks.run_task_manager',
'schedule': timedelta(seconds=20),
'options': {'expires': 20}
},
# 'isolated_heartbeat': set up at the end of production.py and development.py
}
AWX_INCONSISTENT_TASK_INTERVAL = 60 * 3
AWX_CELERY_QUEUES_STATIC = [
CELERY_DEFAULT_QUEUE,
]
AWX_CELERY_BCAST_QUEUES_STATIC = [
'tower_broadcast_all',
]
ASGI_AMQP = {
'INIT_FUNC': 'awx.prepare_env',
'MODEL': 'awx.main.models.channels.ChannelGroup',
}
# Django Caching Configuration
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'memcached:11211',
},
}
# Social Auth configuration.
SOCIAL_AUTH_STRATEGY = 'social_django.strategy.DjangoStrategy'
SOCIAL_AUTH_STORAGE = 'social_django.models.DjangoStorage'
SOCIAL_AUTH_USER_MODEL = AUTH_USER_MODEL # noqa
_SOCIAL_AUTH_PIPELINE_BASE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'social_core.pipeline.user.get_username',
'social_core.pipeline.social_auth.associate_by_email',
'social_core.pipeline.user.create_user',
'awx.sso.pipeline.check_user_found_or_created',
'social_core.pipeline.social_auth.associate_user',
'social_core.pipeline.social_auth.load_extra_data',
'awx.sso.pipeline.set_is_active_for_new_user',
'social_core.pipeline.user.user_details',
'awx.sso.pipeline.prevent_inactive_login',
)
SOCIAL_AUTH_PIPELINE = _SOCIAL_AUTH_PIPELINE_BASE + (
'awx.sso.pipeline.update_user_orgs',
'awx.sso.pipeline.update_user_teams',
)
SOCIAL_AUTH_SAML_PIPELINE = _SOCIAL_AUTH_PIPELINE_BASE + (
'awx.sso.pipeline.update_user_orgs_by_saml_attr',
'awx.sso.pipeline.update_user_teams_by_saml_attr',
'awx.sso.pipeline.update_user_orgs',
'awx.sso.pipeline.update_user_teams',
)
SOCIAL_AUTH_LOGIN_URL = '/'
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/sso/complete/'
SOCIAL_AUTH_LOGIN_ERROR_URL = '/sso/error/'
SOCIAL_AUTH_INACTIVE_USER_URL = '/sso/inactive/'
SOCIAL_AUTH_RAISE_EXCEPTIONS = False
SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL = False
#SOCIAL_AUTH_SLUGIFY_USERNAMES = True
SOCIAL_AUTH_CLEAN_USERNAMES = True
SOCIAL_AUTH_SANITIZE_REDIRECTS = True
SOCIAL_AUTH_REDIRECT_IS_HTTPS = False
# Note: These settings may be overridden by database settings.
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = ''
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = ''
SOCIAL_AUTH_GOOGLE_OAUTH2_SCOPE = ['profile']
SOCIAL_AUTH_GITHUB_KEY = ''
SOCIAL_AUTH_GITHUB_SECRET = ''
SOCIAL_AUTH_GITHUB_SCOPE = ['user:email', 'read:org']
SOCIAL_AUTH_GITHUB_ORG_KEY = ''
SOCIAL_AUTH_GITHUB_ORG_SECRET = ''
SOCIAL_AUTH_GITHUB_ORG_NAME = ''
SOCIAL_AUTH_GITHUB_ORG_SCOPE = ['user:email', 'read:org']
SOCIAL_AUTH_GITHUB_TEAM_KEY = ''
SOCIAL_AUTH_GITHUB_TEAM_SECRET = ''
SOCIAL_AUTH_GITHUB_TEAM_ID = ''
SOCIAL_AUTH_GITHUB_TEAM_SCOPE = ['user:email', 'read:org']
SOCIAL_AUTH_AZUREAD_OAUTH2_KEY = ''
SOCIAL_AUTH_AZUREAD_OAUTH2_SECRET = ''
SOCIAL_AUTH_SAML_SP_ENTITY_ID = ''
SOCIAL_AUTH_SAML_SP_PUBLIC_CERT = ''
SOCIAL_AUTH_SAML_SP_PRIVATE_KEY = ''
SOCIAL_AUTH_SAML_ORG_INFO = {}
SOCIAL_AUTH_SAML_TECHNICAL_CONTACT = {}
SOCIAL_AUTH_SAML_SUPPORT_CONTACT = {}
SOCIAL_AUTH_SAML_ENABLED_IDPS = {}
SOCIAL_AUTH_SAML_ORGANIZATION_ATTR = {}
SOCIAL_AUTH_SAML_TEAM_ATTR = {}
# Any ANSIBLE_* settings will be passed to the task runner subprocess
# environment
# Do not want AWX to ask interactive questions and want it to be friendly with
# reprovisioning
ANSIBLE_HOST_KEY_CHECKING = False
# RHEL has too old of an SSH so ansible will select paramiko and this is VERY
# slow.
ANSIBLE_PARAMIKO_RECORD_HOST_KEYS = False
# Force ansible in color even if we don't have a TTY so we can properly colorize
# output
ANSIBLE_FORCE_COLOR = True
# If tmp generated inventory parsing fails (error state), fail playbook fast
ANSIBLE_INVENTORY_UNPARSED_FAILED = True
# Additional environment variables to be passed to the ansible subprocesses
AWX_TASK_ENV = {}
# Flag to enable/disable updating hosts M2M when saving job events.
CAPTURE_JOB_EVENT_HOSTS = False
# Rebuild Host Smart Inventory memberships.
AWX_REBUILD_SMART_MEMBERSHIP = False
# By default, allow arbitrary Jinja templating in extra_vars defined on a Job Template
ALLOW_JINJA_IN_EXTRA_VARS = 'template'
# Enable dynamically pulling roles from a requirement.yml file
# when updating SCM projects
# Note: This setting may be overridden by database settings.
AWX_ROLES_ENABLED = True
# Enable dynamically pulling collections from a requirement.yml file
# when updating SCM projects
# Note: This setting may be overridden by database settings.
AWX_COLLECTIONS_ENABLED = True
# Enable bubblewrap support for running jobs (playbook runs only).
# Note: This setting may be overridden by database settings.
AWX_PROOT_ENABLED = True
# Command/path to bubblewrap.
AWX_PROOT_CMD = 'bwrap'
# Additional paths to hide from jobs using bubblewrap.
# Note: This setting may be overridden by database settings.
AWX_PROOT_HIDE_PATHS = []
# Additional paths to show for jobs using bubbelwrap.
# Note: This setting may be overridden by database settings.
AWX_PROOT_SHOW_PATHS = []
# The directory in which Tower will create new temporary directories for job
# execution and isolation (such as credential files and custom
# inventory scripts).
# Note: This setting may be overridden by database settings.
AWX_PROOT_BASE_PATH = "/tmp"
# User definable ansible callback plugins
# Note: This setting may be overridden by database settings.
AWX_ANSIBLE_CALLBACK_PLUGINS = ""
# Automatically remove nodes that have missed their heartbeats after some time
AWX_AUTO_DEPROVISION_INSTANCES = False
# Enable Pendo on the UI, possible values are 'off', 'anonymous', and 'detailed'
# Note: This setting may be overridden by database settings.
PENDO_TRACKING_STATE = "off"
# Enables Insights data collection for Ansible Tower.
# Note: This setting may be overridden by database settings.
INSIGHTS_TRACKING_STATE = False
# Default list of modules allowed for ad hoc commands.
# Note: This setting may be overridden by database settings.
AD_HOC_COMMANDS = [
'command',
'shell',
'yum',
'apt',
'apt_key',
'apt_repository',
'apt_rpm',
'service',
'group',
'user',
'mount',
'ping',
'selinux',
'setup',
'win_ping',
'win_service',
'win_updates',
'win_group',
'win_user',
]
INV_ENV_VARIABLE_BLACKLIST = ("HOME", "USER", "_", "TERM")
# ----------------
# -- Amazon EC2 --
# ----------------
# AWS does not appear to provide pretty region names via any API, so store the
# list of names here. The available region IDs will be pulled from boto.
# http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region
EC2_REGION_NAMES = {
'us-east-1': _('US East (Northern Virginia)'),
'us-east-2': _('US East (Ohio)'),
'us-west-2': _('US West (Oregon)'),
'us-west-1': _('US West (Northern California)'),
'ca-central-1': _('Canada (Central)'),
'eu-central-1': _('EU (Frankfurt)'),
'eu-west-1': _('EU (Ireland)'),
'eu-west-2': _('EU (London)'),
'ap-southeast-1': _('Asia Pacific (Singapore)'),
'ap-southeast-2': _('Asia Pacific (Sydney)'),
'ap-northeast-1': _('Asia Pacific (Tokyo)'),
'ap-northeast-2': _('Asia Pacific (Seoul)'),
'ap-south-1': _('Asia Pacific (Mumbai)'),
'sa-east-1': _('South America (Sao Paulo)'),
'us-gov-west-1': _('US West (GovCloud)'),
'cn-north-1': _('China (Beijing)'),
}
EC2_REGIONS_BLACKLIST = [
'us-gov-west-1',
'cn-north-1',
]
# Inventory variable name/values for determining if host is active/enabled.
EC2_ENABLED_VAR = 'ec2_state'
EC2_ENABLED_VALUE = 'running'
# Inventory variable name containing unique instance ID.
EC2_INSTANCE_ID_VAR = 'ec2_id'
# Filter for allowed group/host names when importing inventory from EC2.
EC2_GROUP_FILTER = r'^.+$'
EC2_HOST_FILTER = r'^.+$'
EC2_EXCLUDE_EMPTY_GROUPS = True
# ------------
# -- VMware --
# ------------
VMWARE_REGIONS_BLACKLIST = []
# Inventory variable name/values for determining whether a host is
# active in vSphere.
VMWARE_ENABLED_VAR = 'guest.gueststate'
VMWARE_ENABLED_VALUE = 'running'
# Inventory variable name containing the unique instance ID.
VMWARE_INSTANCE_ID_VAR = 'config.instanceuuid'
# Filter for allowed group and host names when importing inventory
# from VMware.
VMWARE_GROUP_FILTER = r'^.+$'
VMWARE_HOST_FILTER = r'^.+$'
VMWARE_EXCLUDE_EMPTY_GROUPS = True
VMWARE_VALIDATE_CERTS = False
# ---------------------------
# -- Google Compute Engine --
# ---------------------------
# It's not possible to get zones in GCE without authenticating, so we
# provide a list here.
# Source: https://developers.google.com/compute/docs/zones
GCE_REGION_CHOICES = [
('us-east1-b', _('US East 1 (B)')),
('us-east1-c', _('US East 1 (C)')),
('us-east1-d', _('US East 1 (D)')),
('us-east4-a', _('US East 4 (A)')),
('us-east4-b', _('US East 4 (B)')),
('us-east4-c', _('US East 4 (C)')),
('us-central1-a', _('US Central (A)')),
('us-central1-b', _('US Central (B)')),
('us-central1-c', _('US Central (C)')),
('us-central1-f', _('US Central (F)')),
('us-west1-a', _('US West (A)')),
('us-west1-b', _('US West (B)')),
('us-west1-c', _('US West (C)')),
('europe-west1-b', _('Europe West 1 (B)')),
('europe-west1-c', _('Europe West 1 (C)')),
('europe-west1-d', _('Europe West 1 (D)')),
('europe-west2-a', _('Europe West 2 (A)')),
('europe-west2-b', _('Europe West 2 (B)')),
('europe-west2-c', _('Europe West 2 (C)')),
('asia-east1-a', _('Asia East (A)')),
('asia-east1-b', _('Asia East (B)')),
('asia-east1-c', _('Asia East (C)')),
('asia-southeast1-a', _('Asia Southeast (A)')),
('asia-southeast1-b', _('Asia Southeast (B)')),
('asia-northeast1-a', _('Asia Northeast (A)')),
('asia-northeast1-b', _('Asia Northeast (B)')),
('asia-northeast1-c', _('Asia Northeast (C)')),
('australia-southeast1-a', _('Australia Southeast (A)')),
('australia-southeast1-b', _('Australia Southeast (B)')),
('australia-southeast1-c', _('Australia Southeast (C)')),
]
GCE_REGIONS_BLACKLIST = []
# Inventory variable name/value for determining whether a host is active
# in Google Compute Engine.
GCE_ENABLED_VAR = 'status'
GCE_ENABLED_VALUE = 'running'
# Filter for allowed group and host names when importing inventory from
# Google Compute Engine.
GCE_GROUP_FILTER = r'^.+$'
GCE_HOST_FILTER = r'^.+$'
GCE_EXCLUDE_EMPTY_GROUPS = True
GCE_INSTANCE_ID_VAR = 'gce_id'
# --------------------------------------
# -- Microsoft Azure Resource Manager --
# --------------------------------------
# It's not possible to get zones in Azure without authenticating, so we
# provide a list here.
AZURE_RM_REGION_CHOICES = [
('eastus', _('US East')),
('eastus2', _('US East 2')),
('centralus', _('US Central')),
('northcentralus', _('US North Central')),
('southcentralus', _('US South Central')),
('westcentralus', _('US West Central')),
('westus', _('US West')),
('westus2', _('US West 2')),
('canadaeast', _('Canada East')),
('canadacentral', _('Canada Central')),
('brazilsouth', _('Brazil South')),
('northeurope', _('Europe North')),
('westeurope', _('Europe West')),
('ukwest', _('UK West')),
('uksouth', _('UK South')),
('eastasia', _('Asia East')),
('southestasia', _('Asia Southeast')),
('australiaeast', _('Australia East')),
('australiasoutheast', _('Australia Southeast')),
('westindia', _('India West')),
('southindia', _('India South')),
('japaneast', _('Japan East')),
('japanwest', _('Japan West')),
('koreacentral', _('Korea Central')),
('koreasouth', _('Korea South')),
]
AZURE_RM_REGIONS_BLACKLIST = []
AZURE_RM_GROUP_FILTER = r'^.+$'
AZURE_RM_HOST_FILTER = r'^.+$'
AZURE_RM_ENABLED_VAR = 'powerstate'
AZURE_RM_ENABLED_VALUE = 'running'
AZURE_RM_INSTANCE_ID_VAR = 'id'
AZURE_RM_EXCLUDE_EMPTY_GROUPS = True
# ---------------------
# ----- OpenStack -----
# ---------------------
OPENSTACK_ENABLED_VAR = 'status'
OPENSTACK_ENABLED_VALUE = 'ACTIVE'
OPENSTACK_GROUP_FILTER = r'^.+$'
OPENSTACK_HOST_FILTER = r'^.+$'
OPENSTACK_EXCLUDE_EMPTY_GROUPS = True
OPENSTACK_INSTANCE_ID_VAR = 'openstack.id'
# ---------------------
# ----- oVirt4 -----
# ---------------------
RHV_ENABLED_VAR = 'status'
RHV_ENABLED_VALUE = 'up'
RHV_GROUP_FILTER = r'^.+$'
RHV_HOST_FILTER = r'^.+$'
RHV_EXCLUDE_EMPTY_GROUPS = True
RHV_INSTANCE_ID_VAR = 'id'
# ---------------------
# ----- Tower -----
# ---------------------
TOWER_ENABLED_VAR = 'remote_tower_enabled'
TOWER_ENABLED_VALUE = 'true'
TOWER_GROUP_FILTER = r'^.+$'
TOWER_HOST_FILTER = r'^.+$'
TOWER_EXCLUDE_EMPTY_GROUPS = True
TOWER_INSTANCE_ID_VAR = 'remote_tower_id'
# ---------------------
# ----- Foreman -----
# ---------------------
SATELLITE6_ENABLED_VAR = 'foreman.enabled'
SATELLITE6_ENABLED_VALUE = 'True'
SATELLITE6_GROUP_FILTER = r'^.+$'
SATELLITE6_HOST_FILTER = r'^.+$'
SATELLITE6_EXCLUDE_EMPTY_GROUPS = True
SATELLITE6_INSTANCE_ID_VAR = 'foreman.id'
# SATELLITE6_GROUP_PREFIX and SATELLITE6_GROUP_PATTERNS defined in source vars
# ---------------------
# ----- CloudForms -----
# ---------------------
CLOUDFORMS_ENABLED_VAR = 'cloudforms.power_state'
CLOUDFORMS_ENABLED_VALUE = 'on'
CLOUDFORMS_GROUP_FILTER = r'^.+$'
CLOUDFORMS_HOST_FILTER = r'^.+$'
CLOUDFORMS_EXCLUDE_EMPTY_GROUPS = True
CLOUDFORMS_INSTANCE_ID_VAR = 'cloudforms.id'
# ---------------------
# ----- Custom -----
# ---------------------
#CUSTOM_ENABLED_VAR =
#CUSTOM_ENABLED_VALUE =
CUSTOM_GROUP_FILTER = r'^.+$'
CUSTOM_HOST_FILTER = r'^.+$'
CUSTOM_EXCLUDE_EMPTY_GROUPS = False
#CUSTOM_INSTANCE_ID_VAR =
# ---------------------
# ----- SCM -----
# ---------------------
#SCM_ENABLED_VAR =
#SCM_ENABLED_VALUE =
SCM_GROUP_FILTER = r'^.+$'
SCM_HOST_FILTER = r'^.+$'
SCM_EXCLUDE_EMPTY_GROUPS = False
#SCM_INSTANCE_ID_VAR =
# ---------------------
# -- Activity Stream --
# ---------------------
# Defaults for enabling/disabling activity stream.
# Note: These settings may be overridden by database settings.
ACTIVITY_STREAM_ENABLED = True
ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC = False
# Internal API URL for use by inventory scripts and callback plugin.
INTERNAL_API_URL = 'http://127.0.0.1:%s' % DEVSERVER_DEFAULT_PORT
PERSISTENT_CALLBACK_MESSAGES = True
USE_CALLBACK_QUEUE = True
CALLBACK_QUEUE = "callback_tasks"
SCHEDULER_QUEUE = "scheduler"
TASK_COMMAND_PORT = 6559
SOCKETIO_NOTIFICATION_PORT = 6557
SOCKETIO_LISTEN_PORT = 8080
FACT_CACHE_PORT = 6564
# Note: This setting may be overridden by database settings.
ORG_ADMINS_CAN_SEE_ALL_USERS = True
MANAGE_ORGANIZATION_AUTH = True
# Note: This setting may be overridden by database settings.
TOWER_ADMIN_ALERTS = True
# Note: This setting may be overridden by database settings.
TOWER_URL_BASE = "https://towerhost"
INSIGHTS_URL_BASE = "https://example.org"
INSIGHTS_AGENT_MIME = 'application/example'
TOWER_SETTINGS_MANIFEST = {}
# Settings related to external logger configuration
LOG_AGGREGATOR_ENABLED = False
LOG_AGGREGATOR_TCP_TIMEOUT = 5
LOG_AGGREGATOR_VERIFY_CERT = True
LOG_AGGREGATOR_LEVEL = 'INFO'
# The number of retry attempts for websocket session establishment
# If you're encountering issues establishing websockets in clustered Tower,
# raising this value can help
CHANNEL_LAYER_RECEIVE_MAX_RETRY = 10
# Logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
'require_debug_true_or_test': {
'()': 'awx.main.utils.RequireDebugTrueOrTest',
},
'external_log_enabled': {
'()': 'awx.main.utils.filters.ExternalLoggerEnabled'
},
'dynamic_level_filter': {
'()': 'awx.main.utils.filters.DynamicLevelFilter'
}
},
'formatters': {
'simple': {
'format': '%(asctime)s %(levelname)-8s %(name)s %(message)s',
},
'json': {
'()': 'awx.main.utils.formatters.LogstashFormatter'
},
'timed_import': {
'()': 'awx.main.utils.formatters.TimeFormatter',
'format': '%(relativeSeconds)9.3f %(levelname)-8s %(message)s'
},
'dispatcher': {
'format': '%(asctime)s %(levelname)-8s %(name)s PID:%(process)d %(message)s',
},
},
'handlers': {
'console': {
'()': 'logging.StreamHandler',
'level': 'DEBUG',
'filters': ['require_debug_true_or_test'],
'formatter': 'simple',
},
'null': {
'class': 'logging.NullHandler',
},
'file': {
'class': 'logging.NullHandler',
'formatter': 'simple',
},
'syslog': {
'level': 'WARNING',
'filters': ['require_debug_false'],
'class': 'logging.NullHandler',
'formatter': 'simple',
},
'external_logger': {
'class': 'awx.main.utils.handlers.AWXProxyHandler',
'formatter': 'json',
'filters': ['external_log_enabled', 'dynamic_level_filter'],
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler',
},
'tower_warnings': {
# don't define a level here, it's set by settings.LOG_AGGREGATOR_LEVEL
'class': 'logging.handlers.RotatingFileHandler',
'filters': ['require_debug_false', 'dynamic_level_filter'],
'filename': os.path.join(LOG_ROOT, 'tower.log'),
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 5,
'formatter':'simple',
},
'callback_receiver': {
# don't define a level here, it's set by settings.LOG_AGGREGATOR_LEVEL
'class': 'logging.handlers.RotatingFileHandler',
'filters': ['require_debug_false', 'dynamic_level_filter'],
'filename': os.path.join(LOG_ROOT, 'callback_receiver.log'),
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 5,
'formatter':'simple',
},
'dispatcher': {
# don't define a level here, it's set by settings.LOG_AGGREGATOR_LEVEL
'class': 'logging.handlers.RotatingFileHandler',
'filters': ['require_debug_false', 'dynamic_level_filter'],
'filename': os.path.join(LOG_ROOT, 'dispatcher.log'),
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 5,
'formatter':'dispatcher',
},
'celery.beat': {
'class':'logging.StreamHandler',
'level': 'ERROR'
}, # don't log every celerybeat wakeup
'inventory_import': {
'level': 'DEBUG',
'class':'logging.StreamHandler',
'formatter': 'timed_import',
},
'task_system': {
# don't define a level here, it's set by settings.LOG_AGGREGATOR_LEVEL
'class': 'logging.handlers.RotatingFileHandler',
'filters': ['require_debug_false', 'dynamic_level_filter'],
'filename': os.path.join(LOG_ROOT, 'task_system.log'),
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 5,
'formatter':'simple',
},
'management_playbooks': {
'level': 'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filters': ['require_debug_false'],
'filename': os.path.join(LOG_ROOT, 'management_playbooks.log'),
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 5,
'formatter':'simple',
},
'system_tracking_migrations': {
'level': 'WARNING',
'class':'logging.handlers.RotatingFileHandler',
'filters': ['require_debug_false'],
'filename': os.path.join(LOG_ROOT, 'tower_system_tracking_migrations.log'),
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 5,
'formatter':'simple',
},
'rbac_migrations': {
'level': 'WARNING',
'class':'logging.handlers.RotatingFileHandler',
'filters': ['require_debug_false'],
'filename': os.path.join(LOG_ROOT, 'tower_rbac_migrations.log'),
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 5,
'formatter':'simple',
},
},
'loggers': {
'django': {
'handlers': ['console'],
},
'django.request': {
'handlers': ['console', 'file', 'tower_warnings'],
'level': 'WARNING',
},
'kombu': {
'handlers': ['console', 'file', 'tower_warnings'],
'level': 'WARNING',
},
'rest_framework.request': {
'handlers': ['console', 'file', 'tower_warnings'],
'level': 'WARNING',
'propagate': False,
},
'py.warnings': {
'handlers': ['console'],
},
'awx': {
'handlers': ['console', 'file', 'tower_warnings', 'external_logger'],
'level': 'DEBUG',
},
'awx.conf': {
'handlers': ['null'],
'level': 'WARNING',
},
'awx.conf.settings': {
'handlers': ['null'],
'level': 'WARNING',
},
'awx.main': {
'handlers': ['null']
},
'awx.main.commands.run_callback_receiver': {
'handlers': ['callback_receiver'],
'level': 'INFO' # in debug mode, includes full callback data
},
'awx.main.dispatch': {
'handlers': ['dispatcher'],
},
'awx.isolated.manager.playbooks': {
'handlers': ['management_playbooks'],
'propagate': False
},
'awx.main.commands.inventory_import': {
'handlers': ['inventory_import'],
'propagate': False
},
'awx.main.tasks': {
'handlers': ['task_system', 'external_logger'],
'propagate': False
},
'awx.main.scheduler': {
'handlers': ['task_system', 'external_logger'],
'propagate': False
},
'awx.main.access': {
'level': 'INFO', # very verbose debug-level logs
},
'awx.main.signals': {
'level': 'INFO', # very verbose debug-level logs
},
'awx.api.permissions': {
'level': 'INFO', # very verbose debug-level logs
},
'awx.analytics': {
'handlers': ['external_logger'],
'level': 'INFO',
'propagate': False
},
'django_auth_ldap': {
'handlers': ['console', 'file', 'tower_warnings'],
'level': 'DEBUG',
},
'social': {
'handlers': ['console', 'file', 'tower_warnings'],
'level': 'DEBUG',
},
'system_tracking_migrations': {
'handlers': ['console', 'file', 'tower_warnings'],
'level': 'DEBUG',
},
'rbac_migrations': {
'handlers': ['console', 'file', 'tower_warnings'],
'level': 'DEBUG',
},
}
}
# Apply coloring to messages logged to the console
COLOR_LOGS = False
# https://github.com/django-polymorphic/django-polymorphic/issues/195
# FIXME: Disabling models.E006 warning until we can renamed Project and InventorySource
SILENCED_SYSTEM_CHECKS = ['models.E006']
# Use middleware to get request statistics
AWX_REQUEST_PROFILE = False
# Delete temporary directories created to store playbook run-time
AWX_CLEANUP_PATHS = True
MIDDLEWARE = [
'awx.main.middleware.TimingMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'awx.main.middleware.MigrationRanCheckMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'awx.main.middleware.ActivityStreamMiddleware',
'awx.sso.middleware.SocialAuthMiddleware',
'crum.CurrentRequestUserMiddleware',
'awx.main.middleware.URLModificationMiddleware',
'awx.main.middleware.SessionTimeoutMiddleware',
]
| 33.147083 | 89 | 0.666832 |
ac8b7b1ce6fd4cb2826df13af5f8c85b5c4169a1 | 173,371 | py | Python | nova/tests/virt/xenapi/test_xenapi.py | bopopescu/nested_quota | 6d8443287e29c2c9e03cd4e5c5757424314280ad | [
"Apache-2.0"
] | 1 | 2015-11-25T10:18:22.000Z | 2015-11-25T10:18:22.000Z | nova/tests/virt/xenapi/test_xenapi.py | bopopescu/nested_quota | 6d8443287e29c2c9e03cd4e5c5757424314280ad | [
"Apache-2.0"
] | 9 | 2015-05-20T11:20:17.000Z | 2017-07-27T08:21:33.000Z | nova/tests/virt/xenapi/test_xenapi.py | bopopescu/nested_quota | 6d8443287e29c2c9e03cd4e5c5757424314280ad | [
"Apache-2.0"
] | 13 | 2015-05-05T09:34:04.000Z | 2017-11-08T02:03:46.000Z | # Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test suite for XenAPI."""
import ast
import base64
import contextlib
import copy
import functools
import os
import re
import mock
import mox
from oslo.config import cfg
from nova.compute import api as compute_api
from nova.compute import arch
from nova.compute import flavors
from nova.compute import hvtype
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor import api as conductor_api
from nova import context
from nova import crypto
from nova import db
from nova import exception
from nova import objects
from nova.objects import instance as instance_obj
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import test
from nova.tests.db import fakes as db_fakes
from nova.tests import fake_instance
from nova.tests import fake_network
from nova.tests import fake_processutils
import nova.tests.image.fake as fake_image
from nova.tests import matchers
from nova.tests.objects import test_aggregate
from nova.tests.virt.xenapi import stubs
from nova.virt import fake
from nova.virt.xenapi import agent
from nova.virt.xenapi.client import session as xenapi_session
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import host
from nova.virt.xenapi.image import glance
from nova.virt.xenapi import pool
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import volume_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('network_manager', 'nova.service')
CONF.import_opt('compute_driver', 'nova.virt.driver')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('default_availability_zone', 'nova.availability_zones')
CONF.import_opt('login_timeout', 'nova.virt.xenapi.client.session',
group="xenserver")
IMAGE_MACHINE = '1'
IMAGE_KERNEL = '2'
IMAGE_RAMDISK = '3'
IMAGE_RAW = '4'
IMAGE_VHD = '5'
IMAGE_ISO = '6'
IMAGE_IPXE_ISO = '7'
IMAGE_FROM_VOLUME = '8'
IMAGE_FIXTURES = {
IMAGE_MACHINE: {
'image_meta': {'name': 'fakemachine', 'size': 0,
'disk_format': 'ami',
'container_format': 'ami'},
},
IMAGE_KERNEL: {
'image_meta': {'name': 'fakekernel', 'size': 0,
'disk_format': 'aki',
'container_format': 'aki'},
},
IMAGE_RAMDISK: {
'image_meta': {'name': 'fakeramdisk', 'size': 0,
'disk_format': 'ari',
'container_format': 'ari'},
},
IMAGE_RAW: {
'image_meta': {'name': 'fakeraw', 'size': 0,
'disk_format': 'raw',
'container_format': 'bare'},
},
IMAGE_VHD: {
'image_meta': {'name': 'fakevhd', 'size': 0,
'disk_format': 'vhd',
'container_format': 'ovf'},
},
IMAGE_ISO: {
'image_meta': {'name': 'fakeiso', 'size': 0,
'disk_format': 'iso',
'container_format': 'bare'},
},
IMAGE_IPXE_ISO: {
'image_meta': {'name': 'fake_ipxe_iso', 'size': 0,
'disk_format': 'iso',
'container_format': 'bare',
'properties': {'ipxe_boot': 'true'}},
},
IMAGE_FROM_VOLUME: {
'image_meta': {'name': 'fake_ipxe_iso',
'properties': {'foo': 'bar'}},
},
}
def get_session():
return xenapi_session.XenAPISession('test_url', 'root', 'test_pass')
def set_image_fixtures():
image_service = fake_image.FakeImageService()
image_service.images.clear()
for image_id, image_meta in IMAGE_FIXTURES.items():
image_meta = image_meta['image_meta']
image_meta['id'] = image_id
image_service.create(None, image_meta)
def get_fake_device_info():
# FIXME: 'sr_uuid', 'introduce_sr_keys', sr_type and vdi_uuid
# can be removed from the dict when LP bug #1087308 is fixed
fake_vdi_ref = xenapi_fake.create_vdi('fake-vdi', None)
fake_vdi_uuid = xenapi_fake.get_record('VDI', fake_vdi_ref)['uuid']
fake = {'block_device_mapping':
[{'connection_info': {'driver_volume_type': 'iscsi',
'data': {'sr_uuid': 'falseSR',
'introduce_sr_keys': ['sr_type'],
'sr_type': 'iscsi',
'vdi_uuid': fake_vdi_uuid,
'target_discovered': False,
'target_iqn': 'foo_iqn:foo_volid',
'target_portal': 'localhost:3260',
'volume_id': 'foo_volid',
'target_lun': 1,
'auth_password': 'my-p@55w0rd',
'auth_username': 'johndoe',
'auth_method': u'CHAP'}, },
'mount_device': 'vda',
'delete_on_termination': False}, ],
'root_device_name': '/dev/sda',
'ephemerals': [],
'swap': None, }
return fake
def stub_vm_utils_with_vdi_attached_here(function):
"""vm_utils.with_vdi_attached_here needs to be stubbed out because it
calls down to the filesystem to attach a vdi. This provides a
decorator to handle that.
"""
@functools.wraps(function)
def decorated_function(self, *args, **kwargs):
@contextlib.contextmanager
def fake_vdi_attached_here(*args, **kwargs):
fake_dev = 'fakedev'
yield fake_dev
def fake_image_download(*args, **kwargs):
pass
orig_vdi_attached_here = vm_utils.vdi_attached_here
orig_image_download = fake_image._FakeImageService.download
try:
vm_utils.vdi_attached_here = fake_vdi_attached_here
fake_image._FakeImageService.download = fake_image_download
return function(self, *args, **kwargs)
finally:
fake_image._FakeImageService.download = orig_image_download
vm_utils.vdi_attached_here = orig_vdi_attached_here
return decorated_function
def get_create_system_metadata(context, instance_type_id):
flavor = db.flavor_get(context, instance_type_id)
return flavors.save_flavor_info({}, flavor)
def create_instance_with_system_metadata(context, instance_values):
instance_values['system_metadata'] = get_create_system_metadata(
context, instance_values['instance_type_id'])
return db.instance_create(context, instance_values)
class XenAPIVolumeTestCase(stubs.XenAPITestBaseNoDB):
"""Unit tests for Volume operations."""
def setUp(self):
super(XenAPIVolumeTestCase, self).setUp()
self.flags(disable_process_locking=True,
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.instance = fake_instance.fake_db_instance(name='foo')
@classmethod
def _make_connection_info(cls):
target_iqn = 'iqn.2010-10.org.openstack:volume-00000001'
return {'driver_volume_type': 'iscsi',
'data': {'volume_id': 1,
'target_iqn': target_iqn,
'target_portal': '127.0.0.1:3260,fake',
'target_lun': None,
'auth_method': 'CHAP',
'auth_username': 'username',
'auth_password': 'password'}}
def test_attach_volume(self):
# This shows how to test Ops classes' methods.
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm = xenapi_fake.create_vm(self.instance['name'], 'Running')
conn_info = self._make_connection_info()
self.assertIsNone(
conn.attach_volume(None, conn_info, self.instance, '/dev/sdc'))
# check that the VM has a VBD attached to it
# Get XenAPI record for VBD
vbds = xenapi_fake.get_all('VBD')
vbd = xenapi_fake.get_record('VBD', vbds[0])
vm_ref = vbd['VM']
self.assertEqual(vm_ref, vm)
def test_attach_volume_raise_exception(self):
# This shows how to test when exceptions are raised.
stubs.stubout_session(self.stubs,
stubs.FakeSessionForVolumeFailedTests)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(self.instance['name'], 'Running')
self.assertRaises(exception.VolumeDriverNotFound,
conn.attach_volume,
None, {'driver_volume_type': 'nonexist'},
self.instance, '/dev/sdc')
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIVMTestCase(stubs.XenAPITestBase):
"""Unit tests for VM operations."""
def setUp(self):
super(XenAPIVMTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
self.network = importutils.import_object(CONF.network_manager)
self.flags(disable_process_locking=True,
instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.create_network('fake', 'fake_br1')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
stubs.stubout_get_this_vm_uuid(self.stubs)
stubs.stub_out_vm_methods(self.stubs)
fake_processutils.stub_out_processutils_execute(self.stubs)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.conn._session.is_local_connection = False
fake_image.stub_out_image_service(self.stubs)
set_image_fixtures()
stubs.stubout_image_service_download(self.stubs)
stubs.stubout_stream_disk(self.stubs)
def fake_inject_instance_metadata(self, instance, vm):
pass
self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
fake_inject_instance_metadata)
def fake_safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref):
name_label = "fakenamelabel"
disk_type = "fakedisktype"
virtual_size = 777
return vm_utils.create_vdi(
session, sr_ref, instance, name_label, disk_type,
virtual_size)
self.stubs.Set(vm_utils, '_safe_copy_vdi', fake_safe_copy_vdi)
def tearDown(self):
fake_image.FakeImageService_reset()
super(XenAPIVMTestCase, self).tearDown()
def test_init_host(self):
session = get_session()
vm = vm_utils._get_this_vm_ref(session)
# Local root disk
vdi0 = xenapi_fake.create_vdi('compute', None)
vbd0 = xenapi_fake.create_vbd(vm, vdi0)
# Instance VDI
vdi1 = xenapi_fake.create_vdi('instance-aaaa', None,
other_config={'nova_instance_uuid': 'aaaa'})
xenapi_fake.create_vbd(vm, vdi1)
# Only looks like instance VDI
vdi2 = xenapi_fake.create_vdi('instance-bbbb', None)
vbd2 = xenapi_fake.create_vbd(vm, vdi2)
self.conn.init_host(None)
self.assertEqual(set(xenapi_fake.get_all('VBD')), set([vbd0, vbd2]))
def test_instance_exists(self):
self.mox.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup(mox.IgnoreArg(), 'foo').AndReturn(True)
self.mox.ReplayAll()
self.stubs.Set(objects.Instance, 'name', 'foo')
instance = objects.Instance(uuid='fake-uuid')
self.assertTrue(self.conn.instance_exists(instance))
def test_instance_not_exists(self):
self.mox.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup(mox.IgnoreArg(), 'bar').AndReturn(None)
self.mox.ReplayAll()
self.stubs.Set(objects.Instance, 'name', 'bar')
instance = objects.Instance(uuid='fake-uuid')
self.assertFalse(self.conn.instance_exists(instance))
def test_list_instances_0(self):
instances = self.conn.list_instances()
self.assertEqual(instances, [])
def test_list_instance_uuids_0(self):
instance_uuids = self.conn.list_instance_uuids()
self.assertEqual(instance_uuids, [])
def test_list_instance_uuids(self):
uuids = []
for x in xrange(1, 4):
instance = self._create_instance(x)
uuids.append(instance['uuid'])
instance_uuids = self.conn.list_instance_uuids()
self.assertEqual(len(uuids), len(instance_uuids))
self.assertEqual(set(uuids), set(instance_uuids))
def test_get_rrd_server(self):
self.flags(connection_url='myscheme://myaddress/',
group='xenserver')
server_info = vm_utils._get_rrd_server()
self.assertEqual(server_info[0], 'myscheme')
self.assertEqual(server_info[1], 'myaddress')
expected_raw_diagnostics = {
'vbd_xvdb_write': '0.0',
'memory_target': '4294967296.0000',
'memory_internal_free': '1415564.0000',
'memory': '4294967296.0000',
'vbd_xvda_write': '0.0',
'cpu0': '0.0042',
'vif_0_tx': '287.4134',
'vbd_xvda_read': '0.0',
'vif_0_rx': '1816.0144',
'vif_2_rx': '0.0',
'vif_2_tx': '0.0',
'vbd_xvdb_read': '0.0',
'last_update': '1328795567',
}
def test_get_diagnostics(self):
def fake_get_rrd(host, vm_uuid):
path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(path, 'vm_rrd.xml')) as f:
return re.sub(r'\s', '', f.read())
self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd)
expected = self.expected_raw_diagnostics
instance = self._create_instance()
actual = self.conn.get_diagnostics(instance)
self.assertThat(actual, matchers.DictMatches(expected))
def test_get_instance_diagnostics(self):
def fake_get_rrd(host, vm_uuid):
path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(path, 'vm_rrd.xml')) as f:
return re.sub(r'\s', '', f.read())
self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd)
expected = {
'config_drive': False,
'state': 'running',
'driver': 'xenapi',
'version': '1.0',
'uptime': 0,
'hypervisor_os': None,
'cpu_details': [{'time': 0}, {'time': 0},
{'time': 0}, {'time': 0}],
'nic_details': [{'mac_address': '00:00:00:00:00:00',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 0,
'rx_packets': 0,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 0,
'read_requests': 0,
'write_bytes': 0,
'write_requests': 0}],
'memory_details': {'maximum': 8192, 'used': 0}}
instance = self._create_instance()
actual = self.conn.get_instance_diagnostics(instance)
self.assertEqual(expected, actual.serialize())
def test_get_vnc_console(self):
instance = self._create_instance(obj=True)
session = get_session()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm_ref = vm_utils.lookup(session, instance['name'])
console = conn.get_vnc_console(self.context, instance)
# Note(sulo): We don't care about session id in test
# they will always differ so strip that out
actual_path = console.internal_access_path.split('&')[0]
expected_path = "/console?ref=%s" % str(vm_ref)
self.assertEqual(expected_path, actual_path)
def test_get_vnc_console_for_rescue(self):
instance = self._create_instance(obj=True)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
rescue_vm = xenapi_fake.create_vm(instance['name'] + '-rescue',
'Running')
# Set instance state to rescued
instance['vm_state'] = 'rescued'
console = conn.get_vnc_console(self.context, instance)
# Note(sulo): We don't care about session id in test
# they will always differ so strip that out
actual_path = console.internal_access_path.split('&')[0]
expected_path = "/console?ref=%s" % str(rescue_vm)
self.assertEqual(expected_path, actual_path)
def test_get_vnc_console_instance_not_ready(self):
instance = self._create_instance(obj=True, spawn=False)
instance.vm_state = 'building'
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.InstanceNotFound,
conn.get_vnc_console, self.context, instance)
def test_get_vnc_console_rescue_not_ready(self):
instance = self._create_instance(obj=True, spawn=False)
instance.vm_state = 'rescued'
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.InstanceNotReady,
conn.get_vnc_console, self.context, instance)
def test_instance_snapshot_fails_with_no_primary_vdi(self):
def create_bad_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=False,
osvol=False):
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
'userdevice': 'fake',
'currently_attached': False}
vbd_ref = xenapi_fake._create_object('VBD', vbd_rec)
xenapi_fake.after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
self.stubs.Set(vm_utils, 'create_vbd', create_bad_vbd)
stubs.stubout_instance_snapshot(self.stubs)
# Stubbing out firewall driver as previous stub sets alters
# xml rpc result parsing
stubs.stubout_firewall_driver(self.stubs, self.conn)
instance = self._create_instance()
image_id = "my_snapshot_id"
self.assertRaises(exception.NovaException, self.conn.snapshot,
self.context, instance, image_id,
lambda *args, **kwargs: None)
def test_instance_snapshot(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
image_id = "my_snapshot_id"
stubs.stubout_instance_snapshot(self.stubs)
stubs.stubout_is_snapshot(self.stubs)
# Stubbing out firewall driver as previous stub sets alters
# xml rpc result parsing
stubs.stubout_firewall_driver(self.stubs, self.conn)
instance = self._create_instance()
self.fake_upload_called = False
def fake_image_upload(_self, ctx, session, inst, vdi_uuids,
img_id):
self.fake_upload_called = True
self.assertEqual(ctx, self.context)
self.assertEqual(inst, instance)
self.assertIsInstance(vdi_uuids, list)
self.assertEqual(img_id, image_id)
self.stubs.Set(glance.GlanceStore, 'upload_image',
fake_image_upload)
self.conn.snapshot(self.context, instance, image_id,
func_call_matcher.call)
# Ensure VM was torn down
vm_labels = []
for vm_ref in xenapi_fake.get_all('VM'):
vm_rec = xenapi_fake.get_record('VM', vm_ref)
if not vm_rec["is_control_domain"]:
vm_labels.append(vm_rec["name_label"])
self.assertEqual(vm_labels, [instance['name']])
# Ensure VBDs were torn down
vbd_labels = []
for vbd_ref in xenapi_fake.get_all('VBD'):
vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
vbd_labels.append(vbd_rec["vm_name_label"])
self.assertEqual(vbd_labels, [instance['name']])
# Ensure task states changed in correct order
self.assertIsNone(func_call_matcher.match())
# Ensure VDIs were torn down
for vdi_ref in xenapi_fake.get_all('VDI'):
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
name_label = vdi_rec["name_label"]
self.assertFalse(name_label.endswith('snapshot'))
self.assertTrue(self.fake_upload_called)
def create_vm_record(self, conn, os_type, name):
instances = conn.list_instances()
self.assertEqual(instances, [name])
# Get Nova record for VM
vm_info = conn.get_info({'name': name})
# Get XenAPI record for VM
vms = [rec for ref, rec
in xenapi_fake.get_all_records('VM').iteritems()
if not rec['is_control_domain']]
vm = vms[0]
self.vm_info = vm_info
self.vm = vm
def check_vm_record(self, conn, instance_type_id, check_injection):
flavor = db.flavor_get(conn, instance_type_id)
mem_kib = long(flavor['memory_mb']) << 10
mem_bytes = str(mem_kib << 10)
vcpus = flavor['vcpus']
vcpu_weight = flavor['vcpu_weight']
self.assertEqual(self.vm_info['max_mem'], mem_kib)
self.assertEqual(self.vm_info['mem'], mem_kib)
self.assertEqual(self.vm['memory_static_max'], mem_bytes)
self.assertEqual(self.vm['memory_dynamic_max'], mem_bytes)
self.assertEqual(self.vm['memory_dynamic_min'], mem_bytes)
self.assertEqual(self.vm['VCPUs_max'], str(vcpus))
self.assertEqual(self.vm['VCPUs_at_startup'], str(vcpus))
if vcpu_weight is None:
self.assertEqual(self.vm['VCPUs_params'], {})
else:
self.assertEqual(self.vm['VCPUs_params'],
{'weight': str(vcpu_weight), 'cap': '0'})
# Check that the VM is running according to Nova
self.assertEqual(self.vm_info['state'], power_state.RUNNING)
# Check that the VM is running according to XenAPI.
self.assertEqual(self.vm['power_state'], 'Running')
if check_injection:
xenstore_data = self.vm['xenstore_data']
self.assertNotIn('vm-data/hostname', xenstore_data)
key = 'vm-data/networking/DEADBEEF0001'
xenstore_value = xenstore_data[key]
tcpip_data = ast.literal_eval(xenstore_value)
self.assertEqual(tcpip_data,
{'broadcast': '192.168.1.255',
'dns': ['192.168.1.4', '192.168.1.3'],
'gateway': '192.168.1.1',
'gateway_v6': '2001:db8:0:1::1',
'ip6s': [{'enabled': '1',
'ip': '2001:db8:0:1::1',
'netmask': 64,
'gateway': '2001:db8:0:1::1'}],
'ips': [{'enabled': '1',
'ip': '192.168.1.100',
'netmask': '255.255.255.0',
'gateway': '192.168.1.1'},
{'enabled': '1',
'ip': '192.168.1.101',
'netmask': '255.255.255.0',
'gateway': '192.168.1.1'}],
'label': 'test1',
'mac': 'DE:AD:BE:EF:00:01'})
def check_vm_params_for_windows(self):
self.assertEqual(self.vm['platform']['nx'], 'true')
self.assertEqual(self.vm['HVM_boot_params'], {'order': 'dc'})
self.assertEqual(self.vm['HVM_boot_policy'], 'BIOS order')
# check that these are not set
self.assertEqual(self.vm['PV_args'], '')
self.assertEqual(self.vm['PV_bootloader'], '')
self.assertEqual(self.vm['PV_kernel'], '')
self.assertEqual(self.vm['PV_ramdisk'], '')
def check_vm_params_for_linux(self):
self.assertEqual(self.vm['platform']['nx'], 'false')
self.assertEqual(self.vm['PV_args'], '')
self.assertEqual(self.vm['PV_bootloader'], 'pygrub')
# check that these are not set
self.assertEqual(self.vm['PV_kernel'], '')
self.assertEqual(self.vm['PV_ramdisk'], '')
self.assertEqual(self.vm['HVM_boot_params'], {})
self.assertEqual(self.vm['HVM_boot_policy'], '')
def check_vm_params_for_linux_with_external_kernel(self):
self.assertEqual(self.vm['platform']['nx'], 'false')
self.assertEqual(self.vm['PV_args'], 'root=/dev/xvda1')
self.assertNotEqual(self.vm['PV_kernel'], '')
self.assertNotEqual(self.vm['PV_ramdisk'], '')
# check that these are not set
self.assertEqual(self.vm['HVM_boot_params'], {})
self.assertEqual(self.vm['HVM_boot_policy'], '')
def _list_vdis(self):
session = get_session()
return session.call_xenapi('VDI.get_all')
def _list_vms(self):
session = get_session()
return session.call_xenapi('VM.get_all')
def _check_vdis(self, start_list, end_list):
for vdi_ref in end_list:
if vdi_ref not in start_list:
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
# If the cache is turned on then the base disk will be
# there even after the cleanup
if 'other_config' in vdi_rec:
if 'image-id' not in vdi_rec['other_config']:
self.fail('Found unexpected VDI:%s' % vdi_ref)
else:
self.fail('Found unexpected VDI:%s' % vdi_ref)
def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
instance_type_id="3", os_type="linux",
hostname="test", architecture="x86-64", instance_id=1,
injected_files=None, check_injection=False,
create_record=True, empty_dns=False,
block_device_info=None,
key_data=None):
if injected_files is None:
injected_files = []
# Fake out inject_instance_metadata
def fake_inject_instance_metadata(self, instance, vm):
pass
self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
fake_inject_instance_metadata)
if create_record:
instance = objects.Instance(context=self.context)
instance.project_id = self.project_id
instance.user_id = self.user_id
instance.image_ref = image_ref
instance.kernel_id = kernel_id
instance.ramdisk_id = ramdisk_id
instance.root_gb = 20
instance.ephemeral_gb = 0
instance.instance_type_id = instance_type_id
instance.os_type = os_type
instance.hostname = hostname
instance.key_data = key_data
instance.architecture = architecture
instance.system_metadata = get_create_system_metadata(
self.context, instance_type_id)
instance.create()
else:
instance = objects.Instance.get_by_id(self.context, instance_id)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
if empty_dns:
# NOTE(tr3buchet): this is a terrible way to do this...
network_info[0]['network']['subnets'][0]['dns'] = []
image_meta = {}
if image_ref:
image_meta = IMAGE_FIXTURES[image_ref]["image_meta"]
self.conn.spawn(self.context, instance, image_meta, injected_files,
'herp', network_info, block_device_info)
self.create_vm_record(self.conn, os_type, instance['name'])
self.check_vm_record(self.conn, instance_type_id, check_injection)
self.assertEqual(instance['os_type'], os_type)
self.assertEqual(instance['architecture'], architecture)
def test_spawn_ipxe_iso_success(self):
self.mox.StubOutWithMock(vm_utils, 'get_sr_path')
vm_utils.get_sr_path(mox.IgnoreArg()).AndReturn('/sr/path')
self.flags(ipxe_network_name='test1',
ipxe_boot_menu_url='http://boot.example.com',
ipxe_mkisofs_cmd='/root/mkisofs',
group='xenserver')
self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
self.conn._session.call_plugin_serialized(
'ipxe', 'inject', '/sr/path', mox.IgnoreArg(),
'http://boot.example.com', '192.168.1.100', '255.255.255.0',
'192.168.1.1', '192.168.1.3', '/root/mkisofs')
self.mox.ReplayAll()
self._test_spawn(IMAGE_IPXE_ISO, None, None)
def test_spawn_ipxe_iso_no_network_name(self):
self.flags(ipxe_network_name=None,
ipxe_boot_menu_url='http://boot.example.com',
group='xenserver')
# call_plugin_serialized shouldn't be called
self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
self.mox.ReplayAll()
self._test_spawn(IMAGE_IPXE_ISO, None, None)
def test_spawn_ipxe_iso_no_boot_menu_url(self):
self.flags(ipxe_network_name='test1',
ipxe_boot_menu_url=None,
group='xenserver')
# call_plugin_serialized shouldn't be called
self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
self.mox.ReplayAll()
self._test_spawn(IMAGE_IPXE_ISO, None, None)
def test_spawn_ipxe_iso_unknown_network_name(self):
self.flags(ipxe_network_name='test2',
ipxe_boot_menu_url='http://boot.example.com',
group='xenserver')
# call_plugin_serialized shouldn't be called
self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
self.mox.ReplayAll()
self._test_spawn(IMAGE_IPXE_ISO, None, None)
def test_spawn_empty_dns(self):
# Test spawning with an empty dns list.
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
empty_dns=True)
self.check_vm_params_for_linux()
def test_spawn_not_enough_memory(self):
self.assertRaises(exception.InsufficientFreeMemory,
self._test_spawn,
'1', 2, 3, "4") # m1.xlarge
def test_spawn_fail_cleanup_1(self):
"""Simulates an error while downloading an image.
Verifies that the VM and VDIs created are properly cleaned up.
"""
vdi_recs_start = self._list_vdis()
start_vms = self._list_vms()
stubs.stubout_fetch_disk_image(self.stubs, raise_failure=True)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, '1', 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
end_vms = self._list_vms()
self._check_vdis(vdi_recs_start, vdi_recs_end)
# No additional VMs should be found.
self.assertEqual(start_vms, end_vms)
def test_spawn_fail_cleanup_2(self):
"""Simulates an error while creating VM record.
Verifies that the VM and VDIs created are properly cleaned up.
"""
vdi_recs_start = self._list_vdis()
start_vms = self._list_vms()
stubs.stubout_create_vm(self.stubs)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, '1', 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
end_vms = self._list_vms()
self._check_vdis(vdi_recs_start, vdi_recs_end)
# No additional VMs should be found.
self.assertEqual(start_vms, end_vms)
def test_spawn_fail_cleanup_3(self):
"""Simulates an error while attaching disks.
Verifies that the VM and VDIs created are properly cleaned up.
"""
stubs.stubout_attach_disks(self.stubs)
vdi_recs_start = self._list_vdis()
start_vms = self._list_vms()
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, '1', 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
end_vms = self._list_vms()
self._check_vdis(vdi_recs_start, vdi_recs_end)
# No additional VMs should be found.
self.assertEqual(start_vms, end_vms)
def test_spawn_raw_glance(self):
self._test_spawn(IMAGE_RAW, None, None, os_type=None)
self.check_vm_params_for_windows()
def test_spawn_vhd_glance_linux(self):
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
self.check_vm_params_for_linux()
def test_spawn_vhd_glance_windows(self):
self._test_spawn(IMAGE_VHD, None, None,
os_type="windows", architecture="i386",
instance_type_id=5)
self.check_vm_params_for_windows()
def test_spawn_iso_glance(self):
self._test_spawn(IMAGE_ISO, None, None,
os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
def test_spawn_glance(self):
def fake_fetch_disk_image(context, session, instance, name_label,
image_id, image_type):
sr_ref = vm_utils.safe_find_sr(session)
image_type_str = vm_utils.ImageType.to_string(image_type)
vdi_ref = vm_utils.create_vdi(session, sr_ref, instance,
name_label, image_type_str, "20")
vdi_role = vm_utils.ImageType.get_role(image_type)
vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
return {vdi_role: dict(uuid=vdi_uuid, file=None)}
self.stubs.Set(vm_utils, '_fetch_disk_image',
fake_fetch_disk_image)
self._test_spawn(IMAGE_MACHINE,
IMAGE_KERNEL,
IMAGE_RAMDISK)
self.check_vm_params_for_linux_with_external_kernel()
def test_spawn_boot_from_volume_no_image_meta(self):
dev_info = get_fake_device_info()
self._test_spawn(None, None, None,
block_device_info=dev_info)
def test_spawn_boot_from_volume_no_glance_image_meta(self):
dev_info = get_fake_device_info()
self._test_spawn(IMAGE_FROM_VOLUME, None, None,
block_device_info=dev_info)
def test_spawn_boot_from_volume_with_image_meta(self):
dev_info = get_fake_device_info()
self._test_spawn(IMAGE_VHD, None, None,
block_device_info=dev_info)
def test_spawn_netinject_file(self):
self.flags(flat_injected=True)
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
def _tee_handler(cmd, **kwargs):
actual = kwargs.get('process_input', None)
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
address 192.168.1.100
netmask 255.255.255.0
broadcast 192.168.1.255
gateway 192.168.1.1
dns-nameservers 192.168.1.3 192.168.1.4
iface eth0 inet6 static
address 2001:db8:0:1::1
netmask 64
gateway 2001:db8:0:1::1
"""
self.assertEqual(expected, actual)
self._tee_executed = True
return '', ''
def _readlink_handler(cmd_parts, **kwargs):
return os.path.realpath(cmd_parts[2]), ''
fake_processutils.fake_execute_set_repliers([
# Capture the tee .../etc/network/interfaces command
(r'tee.*interfaces', _tee_handler),
(r'readlink -nm.*', _readlink_handler),
])
self._test_spawn(IMAGE_MACHINE,
IMAGE_KERNEL,
IMAGE_RAMDISK,
check_injection=True)
self.assertTrue(self._tee_executed)
def test_spawn_netinject_xenstore(self):
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
def _mount_handler(cmd, *ignore_args, **ignore_kwargs):
# When mounting, create real files under the mountpoint to simulate
# files in the mounted filesystem
# mount point will be the last item of the command list
self._tmpdir = cmd[len(cmd) - 1]
LOG.debug('Creating files in %s to simulate guest agent',
self._tmpdir)
os.makedirs(os.path.join(self._tmpdir, 'usr', 'sbin'))
# Touch the file using open
open(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'), 'w').close()
return '', ''
def _umount_handler(cmd, *ignore_args, **ignore_kwargs):
# Umount would normally make files in the mounted filesystem
# disappear, so do that here
LOG.debug('Removing simulated guest agent files in %s',
self._tmpdir)
os.remove(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'))
os.rmdir(os.path.join(self._tmpdir, 'usr', 'sbin'))
os.rmdir(os.path.join(self._tmpdir, 'usr'))
return '', ''
def _tee_handler(cmd, *ignore_args, **ignore_kwargs):
self._tee_executed = True
return '', ''
fake_processutils.fake_execute_set_repliers([
(r'mount', _mount_handler),
(r'umount', _umount_handler),
(r'tee.*interfaces', _tee_handler)])
self._test_spawn('1', 2, 3, check_injection=True)
# tee must not run in this case, where an injection-capable
# guest agent is detected
self.assertFalse(self._tee_executed)
def test_spawn_injects_auto_disk_config_to_xenstore(self):
instance = self._create_instance(spawn=False)
self.mox.StubOutWithMock(self.conn._vmops, '_inject_auto_disk_config')
self.conn._vmops._inject_auto_disk_config(instance, mox.IgnoreArg())
self.mox.ReplayAll()
self.conn.spawn(self.context, instance,
IMAGE_FIXTURES['1']["image_meta"], [], 'herp', '')
def test_spawn_vlanmanager(self):
self.flags(network_manager='nova.network.manager.VlanManager',
vlan_interface='fake0')
def dummy(*args, **kwargs):
pass
self.stubs.Set(vmops.VMOps, '_create_vifs', dummy)
# Reset network table
xenapi_fake.reset_table('network')
# Instance id = 2 will use vlan network (see db/fakes.py)
ctxt = self.context.elevated()
self.network.conductor_api = conductor_api.LocalAPI()
self._create_instance(2, False)
networks = self.network.db.network_get_all(ctxt)
with mock.patch('nova.objects.network.Network._from_db_object'):
for network in networks:
self.network.set_network_host(ctxt, network)
self.network.allocate_for_instance(ctxt,
instance_id=2,
instance_uuid='00000000-0000-0000-0000-000000000002',
host=CONF.host,
vpn=None,
rxtx_factor=3,
project_id=self.project_id,
macs=None)
self._test_spawn(IMAGE_MACHINE,
IMAGE_KERNEL,
IMAGE_RAMDISK,
instance_id=2,
create_record=False)
# TODO(salvatore-orlando): a complete test here would require
# a check for making sure the bridge for the VM's VIF is
# consistent with bridge specified in nova db
def test_spawn_with_network_qos(self):
self._create_instance()
for vif_ref in xenapi_fake.get_all('VIF'):
vif_rec = xenapi_fake.get_record('VIF', vif_ref)
self.assertEqual(vif_rec['qos_algorithm_type'], 'ratelimit')
self.assertEqual(vif_rec['qos_algorithm_params']['kbps'],
str(3 * 10 * 1024))
def test_spawn_ssh_key_injection(self):
# Test spawning with key_data on an instance. Should use
# agent file injection.
self.flags(use_agent_default=True,
group='xenserver')
actual_injected_files = []
def fake_inject_file(self, method, args):
path = base64.b64decode(args['b64_path'])
contents = base64.b64decode(args['b64_contents'])
actual_injected_files.append((path, contents))
return jsonutils.dumps({'returncode': '0', 'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_inject_file', fake_inject_file)
def fake_encrypt_text(sshkey, new_pass):
self.assertEqual("ssh-rsa fake_keydata", sshkey)
return "fake"
self.stubs.Set(crypto, 'ssh_encrypt_text', fake_encrypt_text)
expected_data = ('\n# The following ssh key was injected by '
'Nova\nssh-rsa fake_keydata\n')
injected_files = [('/root/.ssh/authorized_keys', expected_data)]
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
key_data='ssh-rsa fake_keydata')
self.assertEqual(actual_injected_files, injected_files)
def test_spawn_ssh_key_injection_non_rsa(self):
# Test spawning with key_data on an instance. Should use
# agent file injection.
self.flags(use_agent_default=True,
group='xenserver')
actual_injected_files = []
def fake_inject_file(self, method, args):
path = base64.b64decode(args['b64_path'])
contents = base64.b64decode(args['b64_contents'])
actual_injected_files.append((path, contents))
return jsonutils.dumps({'returncode': '0', 'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_inject_file', fake_inject_file)
def fake_encrypt_text(sshkey, new_pass):
raise NotImplementedError("Should not be called")
self.stubs.Set(crypto, 'ssh_encrypt_text', fake_encrypt_text)
expected_data = ('\n# The following ssh key was injected by '
'Nova\nssh-dsa fake_keydata\n')
injected_files = [('/root/.ssh/authorized_keys', expected_data)]
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
key_data='ssh-dsa fake_keydata')
self.assertEqual(actual_injected_files, injected_files)
def test_spawn_injected_files(self):
# Test spawning with injected_files.
self.flags(use_agent_default=True,
group='xenserver')
actual_injected_files = []
def fake_inject_file(self, method, args):
path = base64.b64decode(args['b64_path'])
contents = base64.b64decode(args['b64_contents'])
actual_injected_files.append((path, contents))
return jsonutils.dumps({'returncode': '0', 'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_inject_file', fake_inject_file)
injected_files = [('/tmp/foo', 'foobar')]
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
injected_files=injected_files)
self.check_vm_params_for_linux()
self.assertEqual(actual_injected_files, injected_files)
@mock.patch('nova.db.agent_build_get_by_triple')
def test_spawn_agent_upgrade(self, mock_get):
self.flags(use_agent_default=True,
group='xenserver')
mock_get.return_value = {"version": "1.1.0", "architecture": "x86-64",
"hypervisor": "xen", "os": "windows",
"url": "url", "md5hash": "asdf",
'created_at': None, 'updated_at': None,
'deleted_at': None, 'deleted': False,
'id': 1}
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
@mock.patch('nova.db.agent_build_get_by_triple')
def test_spawn_agent_upgrade_fails_silently(self, mock_get):
mock_get.return_value = {"version": "1.1.0", "architecture": "x86-64",
"hypervisor": "xen", "os": "windows",
"url": "url", "md5hash": "asdf",
'created_at': None, 'updated_at': None,
'deleted_at': None, 'deleted': False,
'id': 1}
self._test_spawn_fails_silently_with(exception.AgentError,
method="_plugin_agent_agentupdate", failure="fake_error")
def test_spawn_with_resetnetwork_alternative_returncode(self):
self.flags(use_agent_default=True,
group='xenserver')
def fake_resetnetwork(self, method, args):
fake_resetnetwork.called = True
# NOTE(johngarbutt): as returned by FreeBSD and Gentoo
return jsonutils.dumps({'returncode': '500',
'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_resetnetwork', fake_resetnetwork)
fake_resetnetwork.called = False
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
self.assertTrue(fake_resetnetwork.called)
def _test_spawn_fails_silently_with(self, expected_exception_cls,
method="_plugin_agent_version",
failure=None, value=None):
self.flags(use_agent_default=True,
agent_version_timeout=0,
group='xenserver')
def fake_agent_call(self, method, args):
if failure:
raise xenapi_fake.Failure([failure])
else:
return value
self.stubs.Set(stubs.FakeSessionForVMTests,
method, fake_agent_call)
called = {}
def fake_add_instance_fault(*args, **kwargs):
called["fake_add_instance_fault"] = args[2]
self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
fake_add_instance_fault)
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
actual_exception = called["fake_add_instance_fault"]
self.assertIsInstance(actual_exception, expected_exception_cls)
def test_spawn_fails_silently_with_agent_timeout(self):
self._test_spawn_fails_silently_with(exception.AgentTimeout,
failure="TIMEOUT:fake")
def test_spawn_fails_silently_with_agent_not_implemented(self):
self._test_spawn_fails_silently_with(exception.AgentNotImplemented,
failure="NOT IMPLEMENTED:fake")
def test_spawn_fails_silently_with_agent_error(self):
self._test_spawn_fails_silently_with(exception.AgentError,
failure="fake_error")
def test_spawn_fails_silently_with_agent_bad_return(self):
error = jsonutils.dumps({'returncode': -1, 'message': 'fake'})
self._test_spawn_fails_silently_with(exception.AgentError,
value=error)
def test_rescue(self):
instance = self._create_instance(spawn=False)
xenapi_fake.create_vm(instance['name'], 'Running')
session = get_session()
vm_ref = vm_utils.lookup(session, instance['name'])
swap_vdi_ref = xenapi_fake.create_vdi('swap', None)
root_vdi_ref = xenapi_fake.create_vdi('root', None)
eph1_vdi_ref = xenapi_fake.create_vdi('eph', None)
eph2_vdi_ref = xenapi_fake.create_vdi('eph', None)
vol_vdi_ref = xenapi_fake.create_vdi('volume', None)
xenapi_fake.create_vbd(vm_ref, swap_vdi_ref, userdevice=2)
xenapi_fake.create_vbd(vm_ref, root_vdi_ref, userdevice=0)
xenapi_fake.create_vbd(vm_ref, eph1_vdi_ref, userdevice=4)
xenapi_fake.create_vbd(vm_ref, eph2_vdi_ref, userdevice=5)
xenapi_fake.create_vbd(vm_ref, vol_vdi_ref, userdevice=6,
other_config={'osvol': True})
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
image_meta = {'id': IMAGE_VHD,
'disk_format': 'vhd'}
conn.rescue(self.context, instance, [], image_meta, '')
vm = xenapi_fake.get_record('VM', vm_ref)
rescue_name = "%s-rescue" % vm["name_label"]
rescue_ref = vm_utils.lookup(session, rescue_name)
rescue_vm = xenapi_fake.get_record('VM', rescue_ref)
vdi_refs = {}
for vbd_ref in rescue_vm['VBDs']:
vbd = xenapi_fake.get_record('VBD', vbd_ref)
vdi_refs[vbd['VDI']] = vbd['userdevice']
self.assertEqual('1', vdi_refs[root_vdi_ref])
self.assertEqual('2', vdi_refs[swap_vdi_ref])
self.assertEqual('4', vdi_refs[eph1_vdi_ref])
self.assertEqual('5', vdi_refs[eph2_vdi_ref])
self.assertNotIn(vol_vdi_ref, vdi_refs)
def test_rescue_preserve_disk_on_failure(self):
# test that the original disk is preserved if rescue setup fails
# bug #1227898
instance = self._create_instance()
session = get_session()
image_meta = {'id': IMAGE_VHD,
'disk_format': 'vhd'}
vm_ref = vm_utils.lookup(session, instance['name'])
vdi_ref, vdi_rec = vm_utils.get_vdi_for_vm_safely(session, vm_ref)
# raise an error in the spawn setup process and trigger the
# undo manager logic:
def fake_start(*args, **kwargs):
raise test.TestingException('Start Error')
self.stubs.Set(self.conn._vmops, '_start', fake_start)
self.assertRaises(test.TestingException, self.conn.rescue,
self.context, instance, [], image_meta, '')
# confirm original disk still exists:
vdi_ref2, vdi_rec2 = vm_utils.get_vdi_for_vm_safely(session, vm_ref)
self.assertEqual(vdi_ref, vdi_ref2)
self.assertEqual(vdi_rec['uuid'], vdi_rec2['uuid'])
def test_unrescue(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
# Unrescue expects the original instance to be powered off
conn.power_off(instance)
xenapi_fake.create_vm(instance['name'] + '-rescue', 'Running')
conn.unrescue(instance, None)
def test_unrescue_not_in_rescue(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
# Ensure that it will not unrescue a non-rescued instance.
self.assertRaises(exception.InstanceNotInRescueMode, conn.unrescue,
instance, None)
def test_finish_revert_migration(self):
instance = self._create_instance()
class VMOpsMock():
def __init__(self):
self.finish_revert_migration_called = False
def finish_revert_migration(self, context, instance, block_info,
power_on):
self.finish_revert_migration_called = True
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn._vmops = VMOpsMock()
conn.finish_revert_migration(self.context, instance, None)
self.assertTrue(conn._vmops.finish_revert_migration_called)
def test_reboot_hard(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.reboot(self.context, instance, None, "HARD")
def test_poll_rebooting_instances(self):
self.mox.StubOutWithMock(compute_api.API, 'reboot')
compute_api.API.reboot(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
instance = self._create_instance()
instances = [instance]
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.poll_rebooting_instances(60, instances)
def test_reboot_soft(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.reboot(self.context, instance, None, "SOFT")
def test_reboot_halted(self):
session = get_session()
instance = self._create_instance(spawn=False)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(instance['name'], 'Halted')
conn.reboot(self.context, instance, None, "SOFT")
vm_ref = vm_utils.lookup(session, instance['name'])
vm = xenapi_fake.get_record('VM', vm_ref)
self.assertEqual(vm['power_state'], 'Running')
def test_reboot_unknown_state(self):
instance = self._create_instance(spawn=False)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(instance['name'], 'Unknown')
self.assertRaises(xenapi_fake.Failure, conn.reboot, self.context,
instance, None, "SOFT")
def test_reboot_rescued(self):
instance = self._create_instance()
instance['vm_state'] = vm_states.RESCUED
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
real_result = vm_utils.lookup(conn._session, instance['name'])
self.mox.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup(conn._session, instance['name'],
True).AndReturn(real_result)
self.mox.ReplayAll()
conn.reboot(self.context, instance, None, "SOFT")
def test_get_console_output_succeeds(self):
def fake_get_console_output(instance):
self.assertEqual("instance", instance)
return "console_log"
self.stubs.Set(self.conn._vmops, 'get_console_output',
fake_get_console_output)
self.assertEqual(self.conn.get_console_output('context', "instance"),
"console_log")
def _test_maintenance_mode(self, find_host, find_aggregate):
real_call_xenapi = self.conn._session.call_xenapi
instance = self._create_instance(spawn=True)
api_calls = {}
# Record all the xenapi calls, and return a fake list of hosts
# for the host.get_all call
def fake_call_xenapi(method, *args):
api_calls[method] = args
if method == 'host.get_all':
return ['foo', 'bar', 'baz']
return real_call_xenapi(method, *args)
self.stubs.Set(self.conn._session, 'call_xenapi', fake_call_xenapi)
def fake_aggregate_get(context, host, key):
if find_aggregate:
return [test_aggregate.fake_aggregate]
else:
return []
self.stubs.Set(db, 'aggregate_get_by_host',
fake_aggregate_get)
def fake_host_find(context, session, src, dst):
if find_host:
return 'bar'
else:
raise exception.NoValidHost("I saw this one coming...")
self.stubs.Set(host, '_host_find', fake_host_find)
result = self.conn.host_maintenance_mode('bar', 'on_maintenance')
self.assertEqual(result, 'on_maintenance')
# We expect the VM.pool_migrate call to have been called to
# migrate our instance to the 'bar' host
vm_ref = vm_utils.lookup(self.conn._session, instance['name'])
host_ref = "foo"
expected = (vm_ref, host_ref, {"live": "true"})
self.assertEqual(api_calls.get('VM.pool_migrate'), expected)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.ACTIVE)
self.assertEqual(instance['task_state'], task_states.MIGRATING)
def test_maintenance_mode(self):
self._test_maintenance_mode(True, True)
def test_maintenance_mode_no_host(self):
self.assertRaises(exception.NoValidHost,
self._test_maintenance_mode, False, True)
def test_maintenance_mode_no_aggregate(self):
self.assertRaises(exception.NotFound,
self._test_maintenance_mode, True, False)
def test_uuid_find(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
fake_inst = fake_instance.fake_db_instance(id=123)
fake_inst2 = fake_instance.fake_db_instance(id=456)
db.instance_get_all_by_host(self.context, fake_inst['host'],
columns_to_join=None,
use_slave=False
).AndReturn([fake_inst, fake_inst2])
self.mox.ReplayAll()
expected_name = CONF.instance_name_template % fake_inst['id']
inst_uuid = host._uuid_find(self.context, fake_inst['host'],
expected_name)
self.assertEqual(inst_uuid, fake_inst['uuid'])
def test_session_virtapi(self):
was = {'called': False}
def fake_aggregate_get_by_host(self, *args, **kwargs):
was['called'] = True
raise test.TestingException()
self.stubs.Set(db, "aggregate_get_by_host",
fake_aggregate_get_by_host)
self.stubs.Set(self.conn._session, "is_slave", True)
self.assertRaises(test.TestingException,
self.conn._session._get_host_uuid)
self.assertTrue(was['called'])
def test_per_instance_usage_running(self):
instance = self._create_instance(spawn=True)
flavor = flavors.get_flavor(3)
expected = {instance['uuid']: {'memory_mb': flavor['memory_mb'],
'uuid': instance['uuid']}}
actual = self.conn.get_per_instance_usage()
self.assertEqual(expected, actual)
# Paused instances still consume resources:
self.conn.pause(instance)
actual = self.conn.get_per_instance_usage()
self.assertEqual(expected, actual)
def test_per_instance_usage_suspended(self):
# Suspended instances do not consume memory:
instance = self._create_instance(spawn=True)
self.conn.suspend(instance)
actual = self.conn.get_per_instance_usage()
self.assertEqual({}, actual)
def test_per_instance_usage_halted(self):
instance = self._create_instance(spawn=True)
self.conn.power_off(instance)
actual = self.conn.get_per_instance_usage()
self.assertEqual({}, actual)
def _create_instance(self, instance_id=1, spawn=True, obj=False, **attrs):
"""Creates and spawns a test instance."""
instance_values = {
'id': instance_id,
'uuid': '00000000-0000-0000-0000-00000000000%d' % instance_id,
'display_name': 'host-%d' % instance_id,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 80,
'ephemeral_gb': 0,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'vm_mode': 'hvm',
'architecture': 'x86-64'}
instance_values.update(attrs)
instance = create_instance_with_system_metadata(self.context,
instance_values)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
image_meta = {'id': IMAGE_VHD,
'disk_format': 'vhd'}
if spawn:
self.conn.spawn(self.context, instance, image_meta, [], 'herp',
network_info)
if obj:
instance = objects.Instance._from_db_object(
self.context, objects.Instance(), instance,
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
return instance
def test_destroy_clean_up_kernel_and_ramdisk(self):
def fake_lookup_kernel_ramdisk(session, vm_ref):
return "kernel", "ramdisk"
self.stubs.Set(vm_utils, "lookup_kernel_ramdisk",
fake_lookup_kernel_ramdisk)
def fake_destroy_kernel_ramdisk(session, instance, kernel, ramdisk):
fake_destroy_kernel_ramdisk.called = True
self.assertEqual("kernel", kernel)
self.assertEqual("ramdisk", ramdisk)
fake_destroy_kernel_ramdisk.called = False
self.stubs.Set(vm_utils, "destroy_kernel_ramdisk",
fake_destroy_kernel_ramdisk)
instance = self._create_instance(spawn=True)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
self.conn.destroy(self.context, instance, network_info)
vm_ref = vm_utils.lookup(self.conn._session, instance['name'])
self.assertIsNone(vm_ref)
self.assertTrue(fake_destroy_kernel_ramdisk.called)
class XenAPIDiffieHellmanTestCase(test.NoDBTestCase):
"""Unit tests for Diffie-Hellman code."""
def setUp(self):
super(XenAPIDiffieHellmanTestCase, self).setUp()
self.alice = agent.SimpleDH()
self.bob = agent.SimpleDH()
def test_shared(self):
alice_pub = self.alice.get_public()
bob_pub = self.bob.get_public()
alice_shared = self.alice.compute_shared(bob_pub)
bob_shared = self.bob.compute_shared(alice_pub)
self.assertEqual(alice_shared, bob_shared)
def _test_encryption(self, message):
enc = self.alice.encrypt(message)
self.assertFalse(enc.endswith('\n'))
dec = self.bob.decrypt(enc)
self.assertEqual(dec, message)
def test_encrypt_simple_message(self):
self._test_encryption('This is a simple message.')
def test_encrypt_message_with_newlines_at_end(self):
self._test_encryption('This message has a newline at the end.\n')
def test_encrypt_many_newlines_at_end(self):
self._test_encryption('Message with lotsa newlines.\n\n\n')
def test_encrypt_newlines_inside_message(self):
self._test_encryption('Message\nwith\ninterior\nnewlines.')
def test_encrypt_with_leading_newlines(self):
self._test_encryption('\n\nMessage with leading newlines.')
def test_encrypt_really_long_message(self):
self._test_encryption(''.join(['abcd' for i in xrange(1024)]))
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIMigrateInstance(stubs.XenAPITestBase):
"""Unit test for verifying migration-related actions."""
def setUp(self):
super(XenAPIMigrateInstance, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.create_network('fake', 'fake_br1')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.instance_values = {'id': 1,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': None,
'ramdisk_id': None,
'root_gb': 80,
'ephemeral_gb': 0,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
migration_values = {
'source_compute': 'nova-compute',
'dest_compute': 'nova-compute',
'dest_host': '10.127.5.114',
'status': 'post-migrating',
'instance_uuid': '15f23e6a-cc6e-4d22-b651-d9bdaac316f7',
'old_instance_type_id': 5,
'new_instance_type_id': 1
}
self.migration = db.migration_create(
context.get_admin_context(), migration_values)
fake_processutils.stub_out_processutils_execute(self.stubs)
stubs.stub_out_migration_methods(self.stubs)
stubs.stubout_get_this_vm_uuid(self.stubs)
def fake_inject_instance_metadata(self, instance, vm):
pass
self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
fake_inject_instance_metadata)
def test_migrate_disk_and_power_off(self):
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
flavor = {"root_gb": 80, 'ephemeral_gb': 0}
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.migrate_disk_and_power_off(self.context, instance,
'127.0.0.1', flavor, None)
def test_migrate_disk_and_power_off_passes_exceptions(self):
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
flavor = {"root_gb": 80, 'ephemeral_gb': 0}
def fake_raise(*args, **kwargs):
raise exception.MigrationError(reason='test failure')
self.stubs.Set(vmops.VMOps, "_migrate_disk_resizing_up", fake_raise)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
conn.migrate_disk_and_power_off,
self.context, instance,
'127.0.0.1', flavor, None)
def test_migrate_disk_and_power_off_throws_on_zero_gb_resize_down(self):
instance = db.instance_create(self.context, self.instance_values)
flavor = {"root_gb": 0, 'ephemeral_gb': 0}
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.ResizeError,
conn.migrate_disk_and_power_off,
self.context, instance,
'fake_dest', flavor, None)
def test_migrate_disk_and_power_off_with_zero_gb_old_and_new_works(self):
flavor = {"root_gb": 0, 'ephemeral_gb': 0}
values = copy.copy(self.instance_values)
values["root_gb"] = 0
values["ephemeral_gb"] = 0
instance = db.instance_create(self.context, values)
xenapi_fake.create_vm(instance['name'], 'Running')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.migrate_disk_and_power_off(self.context, instance,
'127.0.0.1', flavor, None)
def _test_revert_migrate(self, power_on):
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
self.called = False
self.fake_vm_start_called = False
self.fake_finish_revert_migration_called = False
context = 'fake_context'
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
def fake_vdi_resize(*args, **kwargs):
self.called = True
def fake_finish_revert_migration(*args, **kwargs):
self.fake_finish_revert_migration_called = True
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
self.stubs.Set(vmops.VMOps, 'finish_revert_migration',
fake_finish_revert_migration)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(4, 0, 0),
product_brand='XenServer')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
base = xenapi_fake.create_vdi('hurr', 'fake')
base_uuid = xenapi_fake.get_record('VDI', base)['uuid']
cow = xenapi_fake.create_vdi('durr', 'fake')
cow_uuid = xenapi_fake.get_record('VDI', cow)['uuid']
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy=base_uuid, cow=cow_uuid),
network_info, image_meta, resize_instance=True,
block_device_info=None, power_on=power_on)
self.assertEqual(self.called, True)
self.assertEqual(self.fake_vm_start_called, power_on)
conn.finish_revert_migration(context, instance, network_info)
self.assertEqual(self.fake_finish_revert_migration_called, True)
def test_revert_migrate_power_on(self):
self._test_revert_migrate(True)
def test_revert_migrate_power_off(self):
self._test_revert_migrate(False)
def _test_finish_migrate(self, power_on):
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
self.called = False
self.fake_vm_start_called = False
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
def fake_vdi_resize(*args, **kwargs):
self.called = True
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(4, 0, 0),
product_brand='XenServer')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True,
block_device_info=None, power_on=power_on)
self.assertEqual(self.called, True)
self.assertEqual(self.fake_vm_start_called, power_on)
def test_finish_migrate_power_on(self):
self._test_finish_migrate(True)
def test_finish_migrate_power_off(self):
self._test_finish_migrate(False)
def test_finish_migrate_no_local_storage(self):
values = copy.copy(self.instance_values)
values["root_gb"] = 0
values["ephemeral_gb"] = 0
instance = create_instance_with_system_metadata(self.context, values)
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True)
def test_finish_migrate_no_resize_vdi(self):
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
# Resize instance would be determined by the compute call
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=False)
@stub_vm_utils_with_vdi_attached_here
def test_migrate_too_many_partitions_no_resize_down(self):
instance_values = self.instance_values
instance = db.instance_create(self.context, instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
flavor = db.flavor_get_by_name(self.context, 'm1.small')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_partitions(partition):
return [(1, 2, 3, 4, "", ""), (1, 2, 3, 4, "", "")]
self.stubs.Set(vm_utils, '_get_partitions', fake_get_partitions)
self.assertRaises(exception.InstanceFaultRollback,
conn.migrate_disk_and_power_off,
self.context, instance,
'127.0.0.1', flavor, None)
@stub_vm_utils_with_vdi_attached_here
def test_migrate_bad_fs_type_no_resize_down(self):
instance_values = self.instance_values
instance = db.instance_create(self.context, instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
flavor = db.flavor_get_by_name(self.context, 'm1.small')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_partitions(partition):
return [(1, 2, 3, "ext2", "", "boot")]
self.stubs.Set(vm_utils, '_get_partitions', fake_get_partitions)
self.assertRaises(exception.InstanceFaultRollback,
conn.migrate_disk_and_power_off,
self.context, instance,
'127.0.0.1', flavor, None)
def test_migrate_rollback_when_resize_down_fs_fails(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
self.mox.StubOutWithMock(vmops, '_resize_ensure_vm_is_shutdown')
self.mox.StubOutWithMock(vmops, '_apply_orig_vm_name_label')
self.mox.StubOutWithMock(vm_utils, 'resize_disk')
self.mox.StubOutWithMock(vm_utils, 'migrate_vhd')
self.mox.StubOutWithMock(vm_utils, 'destroy_vdi')
self.mox.StubOutWithMock(vm_utils, 'get_vdi_for_vm_safely')
self.mox.StubOutWithMock(vmops, '_restore_orig_vm_and_cleanup_orphan')
instance = objects.Instance(context=self.context,
auto_disk_config=True, uuid='uuid')
instance.obj_reset_changes()
vm_ref = "vm_ref"
dest = "dest"
flavor = "type"
sr_path = "sr_path"
vmops._resize_ensure_vm_is_shutdown(instance, vm_ref)
vmops._apply_orig_vm_name_label(instance, vm_ref)
old_vdi_ref = "old_ref"
vm_utils.get_vdi_for_vm_safely(vmops._session, vm_ref).AndReturn(
(old_vdi_ref, None))
new_vdi_ref = "new_ref"
new_vdi_uuid = "new_uuid"
vm_utils.resize_disk(vmops._session, instance, old_vdi_ref,
flavor).AndReturn((new_vdi_ref, new_vdi_uuid))
vm_utils.migrate_vhd(vmops._session, instance, new_vdi_uuid, dest,
sr_path, 0).AndRaise(
exception.ResizeError(reason="asdf"))
vm_utils.destroy_vdi(vmops._session, new_vdi_ref)
vmops._restore_orig_vm_and_cleanup_orphan(instance)
self.mox.ReplayAll()
with mock.patch.object(instance, 'save') as mock_save:
self.assertRaises(exception.InstanceFaultRollback,
vmops._migrate_disk_resizing_down, self.context,
instance, dest, flavor, vm_ref, sr_path)
self.assertEqual(3, mock_save.call_count)
self.assertEqual(60.0, instance.progress)
def test_resize_ensure_vm_is_shutdown_cleanly(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(True)
self.mox.ReplayAll()
vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
def test_resize_ensure_vm_is_shutdown_forced(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(False)
vm_utils.hard_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(True)
self.mox.ReplayAll()
vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
def test_resize_ensure_vm_is_shutdown_fails(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(False)
vm_utils.hard_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.ResizeError,
vmops._resize_ensure_vm_is_shutdown, fake_instance, "ref")
def test_resize_ensure_vm_is_shutdown_already_shutdown(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(True)
self.mox.ReplayAll()
vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
class XenAPIImageTypeTestCase(test.NoDBTestCase):
"""Test ImageType class."""
def test_to_string(self):
# Can convert from type id to type string.
self.assertEqual(
vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL),
vm_utils.ImageType.KERNEL_STR)
def _assert_role(self, expected_role, image_type_id):
self.assertEqual(
expected_role,
vm_utils.ImageType.get_role(image_type_id))
def test_get_image_role_kernel(self):
self._assert_role('kernel', vm_utils.ImageType.KERNEL)
def test_get_image_role_ramdisk(self):
self._assert_role('ramdisk', vm_utils.ImageType.RAMDISK)
def test_get_image_role_disk(self):
self._assert_role('root', vm_utils.ImageType.DISK)
def test_get_image_role_disk_raw(self):
self._assert_role('root', vm_utils.ImageType.DISK_RAW)
def test_get_image_role_disk_vhd(self):
self._assert_role('root', vm_utils.ImageType.DISK_VHD)
class XenAPIDetermineDiskImageTestCase(test.NoDBTestCase):
"""Unit tests for code that detects the ImageType."""
def assert_disk_type(self, image_meta, expected_disk_type):
actual = vm_utils.determine_disk_image_type(image_meta)
self.assertEqual(expected_disk_type, actual)
def test_machine(self):
image_meta = {'id': 'a', 'disk_format': 'ami'}
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK)
def test_raw(self):
image_meta = {'id': 'a', 'disk_format': 'raw'}
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_RAW)
def test_vhd(self):
image_meta = {'id': 'a', 'disk_format': 'vhd'}
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_VHD)
def test_none(self):
image_meta = None
self.assert_disk_type(image_meta, None)
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIHostTestCase(stubs.XenAPITestBase):
"""Tests HostState, which holds metrics from XenServer that get
reported back to the Schedulers.
"""
def setUp(self):
super(XenAPIHostTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.context = context.get_admin_context()
self.flags(use_local=True, group='conductor')
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.instance = fake_instance.fake_db_instance(name='foo')
def test_host_state(self):
stats = self.conn.get_host_stats()
# Values from fake.create_local_srs (ext SR)
self.assertEqual(stats['disk_total'], 40000)
self.assertEqual(stats['disk_used'], 20000)
# Values from fake._plugin_xenhost_host_data
self.assertEqual(stats['host_memory_total'], 10)
self.assertEqual(stats['host_memory_overhead'], 20)
self.assertEqual(stats['host_memory_free'], 30)
self.assertEqual(stats['host_memory_free_computed'], 40)
self.assertEqual(stats['hypervisor_hostname'], 'fake-xenhost')
self.assertThat({'cpu_count': 50},
matchers.DictMatches(stats['host_cpu_info']))
# No VMs running
self.assertEqual(stats['vcpus_used'], 0)
def test_host_state_vcpus_used(self):
stats = self.conn.get_host_stats(True)
self.assertEqual(stats['vcpus_used'], 0)
xenapi_fake.create_vm(self.instance['name'], 'Running')
stats = self.conn.get_host_stats(True)
self.assertEqual(stats['vcpus_used'], 4)
def test_pci_passthrough_devices_whitelist(self):
# NOTE(guillaume-thouvenin): This pci whitelist will be used to
# match with _plugin_xenhost_get_pci_device_details method in fake.py.
white_list = '{"vendor_id":"10de", "product_id":"11bf"}'
self.flags(pci_passthrough_whitelist=[white_list])
stats = self.conn.get_host_stats()
self.assertEqual(len(stats['pci_passthrough_devices']), 1)
def test_pci_passthrough_devices_no_whitelist(self):
stats = self.conn.get_host_stats()
self.assertEqual(len(stats['pci_passthrough_devices']), 0)
def test_host_state_missing_sr(self):
def fake_safe_find_sr(session):
raise exception.StorageRepositoryNotFound('not there')
self.stubs.Set(vm_utils, 'safe_find_sr', fake_safe_find_sr)
self.assertRaises(exception.StorageRepositoryNotFound,
self.conn.get_host_stats)
def _test_host_action(self, method, action, expected=None):
result = method('host', action)
if not expected:
expected = action
self.assertEqual(result, expected)
def test_host_reboot(self):
self._test_host_action(self.conn.host_power_action, 'reboot')
def test_host_shutdown(self):
self._test_host_action(self.conn.host_power_action, 'shutdown')
def test_host_startup(self):
self.assertRaises(NotImplementedError,
self.conn.host_power_action, 'host', 'startup')
def test_host_maintenance_on(self):
self._test_host_action(self.conn.host_maintenance_mode,
True, 'on_maintenance')
def test_host_maintenance_off(self):
self._test_host_action(self.conn.host_maintenance_mode,
False, 'off_maintenance')
def test_set_enable_host_enable(self):
_create_service_entries(self.context, values={'nova': ['fake-mini']})
self._test_host_action(self.conn.set_host_enabled, True, 'enabled')
service = db.service_get_by_args(self.context, 'fake-mini',
'nova-compute')
self.assertEqual(service.disabled, False)
def test_set_enable_host_disable(self):
_create_service_entries(self.context, values={'nova': ['fake-mini']})
self._test_host_action(self.conn.set_host_enabled, False, 'disabled')
service = db.service_get_by_args(self.context, 'fake-mini',
'nova-compute')
self.assertEqual(service.disabled, True)
def test_get_host_uptime(self):
result = self.conn.get_host_uptime('host')
self.assertEqual(result, 'fake uptime')
def test_supported_instances_is_included_in_host_state(self):
stats = self.conn.get_host_stats()
self.assertIn('supported_instances', stats)
def test_supported_instances_is_calculated_by_to_supported_instances(self):
def to_supported_instances(somedata):
self.assertIsNone(somedata)
return "SOMERETURNVALUE"
self.stubs.Set(host, 'to_supported_instances', to_supported_instances)
stats = self.conn.get_host_stats()
self.assertEqual("SOMERETURNVALUE", stats['supported_instances'])
def test_update_stats_caches_hostname(self):
self.mox.StubOutWithMock(host, 'call_xenhost')
self.mox.StubOutWithMock(vm_utils, 'scan_default_sr')
self.mox.StubOutWithMock(vm_utils, 'list_vms')
self.mox.StubOutWithMock(self.conn._session, 'call_xenapi')
data = {'disk_total': 0,
'disk_used': 0,
'disk_available': 0,
'supported_instances': 0,
'host_capabilities': [],
'host_hostname': 'foo',
'vcpus_used': 0,
}
sr_rec = {
'physical_size': 0,
'physical_utilisation': 0,
'virtual_allocation': 0,
}
for i in range(3):
host.call_xenhost(mox.IgnoreArg(), 'host_data', {}).AndReturn(data)
vm_utils.scan_default_sr(self.conn._session).AndReturn("ref")
vm_utils.list_vms(self.conn._session).AndReturn([])
self.conn._session.call_xenapi('SR.get_record', "ref").AndReturn(
sr_rec)
if i == 2:
# On the third call (the second below) change the hostname
data = dict(data, host_hostname='bar')
self.mox.ReplayAll()
stats = self.conn.get_host_stats(refresh=True)
self.assertEqual('foo', stats['hypervisor_hostname'])
stats = self.conn.get_host_stats(refresh=True)
self.assertEqual('foo', stats['hypervisor_hostname'])
class ToSupportedInstancesTestCase(test.NoDBTestCase):
def test_default_return_value(self):
self.assertEqual([],
host.to_supported_instances(None))
def test_return_value(self):
self.assertEqual([(arch.X86_64, hvtype.XEN, 'xen')],
host.to_supported_instances([u'xen-3.0-x86_64']))
def test_invalid_values_do_not_break(self):
self.assertEqual([(arch.X86_64, hvtype.XEN, 'xen')],
host.to_supported_instances([u'xen-3.0-x86_64', 'spam']))
def test_multiple_values(self):
self.assertEqual(
[
(arch.X86_64, hvtype.XEN, 'xen'),
(arch.I686, hvtype.XEN, 'hvm')
],
host.to_supported_instances([u'xen-3.0-x86_64', 'hvm-3.0-x86_32'])
)
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
def setUp(self):
super(XenAPIAutoDiskConfigTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.user_id = 'fake'
self.project_id = 'fake'
self.instance_values = {'id': 1,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 80,
'ephemeral_gb': 0,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
self.context = context.RequestContext(self.user_id, self.project_id)
def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=True,
osvol=False):
pass
self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
def assertIsPartitionCalled(self, called):
marker = {"partition_called": False}
def fake_resize_part_and_fs(dev, start, old_sectors, new_sectors,
flags):
marker["partition_called"] = True
self.stubs.Set(vm_utils, "_resize_part_and_fs",
fake_resize_part_and_fs)
context.RequestContext(self.user_id, self.project_id)
session = get_session()
disk_image_type = vm_utils.ImageType.DISK_VHD
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
vdis = {'root': {'uuid': vdi_uuid, 'ref': vdi_ref}}
self.conn._vmops._attach_disks(instance, vm_ref, instance['name'],
vdis, disk_image_type, "fake_nw_inf")
self.assertEqual(marker["partition_called"], called)
def test_instance_not_auto_disk_config(self):
"""Should not partition unless instance is marked as
auto_disk_config.
"""
self.instance_values['auto_disk_config'] = False
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_fails_safe_two_partitions(self):
# Should not partition unless fail safes pass.
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 0, 100, 'ext4', "", ""), (2, 100, 200, 'ext4' "", "")]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_fails_safe_badly_numbered(self):
# Should not partition unless fail safes pass.
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(2, 100, 200, 'ext4', "", "")]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_fails_safe_bad_fstype(self):
# Should not partition unless fail safes pass.
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 100, 200, 'asdf', "", "")]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_passes_fail_safes(self):
"""Should partition if instance is marked as auto_disk_config=True and
virt-layer specific fail-safe checks pass.
"""
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 0, 100, 'ext4', "", "boot")]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(True)
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIGenerateLocal(stubs.XenAPITestBase):
"""Test generating of local disks, like swap and ephemeral."""
def setUp(self):
super(XenAPIGenerateLocal, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
db_fakes.stub_out_db_instance_api(self.stubs)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.user_id = 'fake'
self.project_id = 'fake'
self.instance_values = {'id': 1,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 80,
'ephemeral_gb': 0,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
self.context = context.RequestContext(self.user_id, self.project_id)
def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=True,
osvol=False, empty=False, unpluggable=True):
return session.call_xenapi('VBD.create', {'VM': vm_ref,
'VDI': vdi_ref})
self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
def assertCalled(self, instance,
disk_image_type=vm_utils.ImageType.DISK_VHD):
context.RequestContext(self.user_id, self.project_id)
session = get_session()
vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
vdi_key = 'root'
if disk_image_type == vm_utils.ImageType.DISK_ISO:
vdi_key = 'iso'
vdis = {vdi_key: {'uuid': vdi_uuid, 'ref': vdi_ref}}
self.called = False
self.conn._vmops._attach_disks(instance, vm_ref, instance['name'],
vdis, disk_image_type, "fake_nw_inf")
self.assertTrue(self.called)
def test_generate_swap(self):
# Test swap disk generation.
instance_values = dict(self.instance_values, instance_type_id=5)
instance = create_instance_with_system_metadata(self.context,
instance_values)
def fake_generate_swap(*args, **kwargs):
self.called = True
self.stubs.Set(vm_utils, 'generate_swap', fake_generate_swap)
self.assertCalled(instance)
def test_generate_ephemeral(self):
# Test ephemeral disk generation.
instance_values = dict(self.instance_values, instance_type_id=4)
instance = create_instance_with_system_metadata(self.context,
instance_values)
def fake_generate_ephemeral(*args):
self.called = True
self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
self.assertCalled(instance)
def test_generate_iso_blank_root_disk(self):
instance_values = dict(self.instance_values, instance_type_id=4)
instance_values.pop('kernel_id')
instance_values.pop('ramdisk_id')
instance = create_instance_with_system_metadata(self.context,
instance_values)
def fake_generate_ephemeral(*args):
pass
self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
def fake_generate_iso(*args):
self.called = True
self.stubs.Set(vm_utils, 'generate_iso_blank_root_disk',
fake_generate_iso)
self.assertCalled(instance, vm_utils.ImageType.DISK_ISO)
class XenAPIBWCountersTestCase(stubs.XenAPITestBaseNoDB):
FAKE_VMS = {'test1:ref': dict(name_label='test1',
other_config=dict(nova_uuid='hash'),
domid='12',
_vifmap={'0': "a:b:c:d...",
'1': "e:f:12:q..."}),
'test2:ref': dict(name_label='test2',
other_config=dict(nova_uuid='hash'),
domid='42',
_vifmap={'0': "a:3:c:d...",
'1': "e:f:42:q..."}),
}
def setUp(self):
super(XenAPIBWCountersTestCase, self).setUp()
self.stubs.Set(vm_utils, 'list_vms',
XenAPIBWCountersTestCase._fake_list_vms)
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def _fake_get_vif_device_map(vm_rec):
return vm_rec['_vifmap']
self.stubs.Set(self.conn._vmops, "_get_vif_device_map",
_fake_get_vif_device_map)
@classmethod
def _fake_list_vms(cls, session):
return cls.FAKE_VMS.iteritems()
@staticmethod
def _fake_fetch_bandwidth_mt(session):
return {}
@staticmethod
def _fake_fetch_bandwidth(session):
return {'42':
{'0': {'bw_in': 21024, 'bw_out': 22048},
'1': {'bw_in': 231337, 'bw_out': 221212121}},
'12':
{'0': {'bw_in': 1024, 'bw_out': 2048},
'1': {'bw_in': 31337, 'bw_out': 21212121}},
}
def test_get_all_bw_counters(self):
instances = [dict(name='test1', uuid='1-2-3'),
dict(name='test2', uuid='4-5-6')]
self.stubs.Set(vm_utils, 'fetch_bandwidth',
self._fake_fetch_bandwidth)
result = self.conn.get_all_bw_counters(instances)
self.assertEqual(len(result), 4)
self.assertIn(dict(uuid='1-2-3',
mac_address="a:b:c:d...",
bw_in=1024,
bw_out=2048), result)
self.assertIn(dict(uuid='1-2-3',
mac_address="e:f:12:q...",
bw_in=31337,
bw_out=21212121), result)
self.assertIn(dict(uuid='4-5-6',
mac_address="a:3:c:d...",
bw_in=21024,
bw_out=22048), result)
self.assertIn(dict(uuid='4-5-6',
mac_address="e:f:42:q...",
bw_in=231337,
bw_out=221212121), result)
def test_get_all_bw_counters_in_failure_case(self):
"""Test that get_all_bw_conters returns an empty list when
no data returned from Xenserver. c.f. bug #910045.
"""
instances = [dict(name='instance-0001', uuid='1-2-3-4-5')]
self.stubs.Set(vm_utils, 'fetch_bandwidth',
self._fake_fetch_bandwidth_mt)
result = self.conn.get_all_bw_counters(instances)
self.assertEqual(result, [])
# TODO(salvatore-orlando): this class and
# nova.tests.virt.test_libvirt.IPTablesFirewallDriverTestCase share a lot of
# code. Consider abstracting common code in a base class for firewall driver
# testing.
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
_in_rules = [
'# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
'*nat',
':PREROUTING ACCEPT [1170:189210]',
':INPUT ACCEPT [844:71028]',
':OUTPUT ACCEPT [5149:405186]',
':POSTROUTING ACCEPT [5063:386098]',
'# Completed on Mon Dec 6 11:54:13 2010',
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*mangle',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -o virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'[0:0] -A FORWARD -i virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -o virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'[0:0] -A FORWARD -i virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
]
_in6_filter_rules = [
'# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
'*filter',
':INPUT ACCEPT [349155:75810423]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [349256:75777230]',
'COMMIT',
'# Completed on Tue Jan 18 23:47:56 2011',
]
def setUp(self):
super(XenAPIDom0IptablesFirewallTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.user_id = 'mappin'
self.project_id = 'fake'
stubs.stubout_session(self.stubs, stubs.FakeSessionForFirewallTests,
test_case=self)
self.context = context.RequestContext(self.user_id, self.project_id)
self.network = importutils.import_object(CONF.network_manager)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.fw = self.conn._vmops.firewall_driver
def _create_instance_ref(self):
return db.instance_create(self.context,
{'user_id': self.user_id,
'project_id': self.project_id,
'instance_type_id': 1})
def _create_test_security_group(self):
admin_ctxt = context.get_admin_context()
secgroup = db.security_group_create(admin_ctxt,
{'user_id': self.user_id,
'project_id': self.project_id,
'name': 'testgroup',
'description': 'test group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': 8,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'cidr': '192.168.10.0/24'})
return secgroup
def _validate_security_group(self):
in_rules = filter(lambda l: not l.startswith('#'),
self._in_rules)
for rule in in_rules:
if 'nova' not in rule:
self.assertTrue(rule in self._out_rules,
'Rule went missing: %s' % rule)
instance_chain = None
for rule in self._out_rules:
# This is pretty crude, but it'll do for now
# last two octets change
if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
instance_chain = rule.split(' ')[-1]
break
self.assertTrue(instance_chain, "The instance chain wasn't added")
security_group_chain = None
for rule in self._out_rules:
# This is pretty crude, but it'll do for now
if '-A %s -j' % instance_chain in rule:
security_group_chain = rule.split(' ')[-1]
break
self.assertTrue(security_group_chain,
"The security group chain wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp'
' -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"ICMP acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp'
' --icmp-type 8 -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"ICMP Echo Request acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp --dport 80:81'
' -s 192.168.10.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
def test_static_filters(self):
instance_ref = self._create_instance_ref()
src_instance_ref = self._create_instance_ref()
admin_ctxt = context.get_admin_context()
secgroup = self._create_test_security_group()
src_secgroup = db.security_group_create(admin_ctxt,
{'user_id': self.user_id,
'project_id': self.project_id,
'name': 'testsourcegroup',
'description': 'src group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'group_id': src_secgroup['id']})
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
db.instance_add_security_group(admin_ctxt, src_instance_ref['uuid'],
src_secgroup['id'])
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id'])
network_model = fake_network.fake_get_instance_nw_info(self.stubs, 1)
from nova.compute import utils as compute_utils # noqa
self.stubs.Set(compute_utils, 'get_nw_info_for_instance',
lambda instance: network_model)
self.fw.prepare_instance_filter(instance_ref, network_model)
self.fw.apply_instance_filter(instance_ref, network_model)
self._validate_security_group()
# Extra test for TCP acceptance rules
for ip in network_model.fixed_ips():
if ip['version'] != 4:
continue
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp'
' --dport 80:81 -s %s' % ip['address'])
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
db.instance_destroy(admin_ctxt, instance_ref['uuid'])
def test_filters_for_instance_with_ip_v6(self):
self.flags(use_ipv6=True)
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEqual(len(rulesv4), 2)
self.assertEqual(len(rulesv6), 1)
def test_filters_for_instance_without_ip_v6(self):
self.flags(use_ipv6=False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEqual(len(rulesv4), 2)
self.assertEqual(len(rulesv6), 0)
def test_multinic_iptables(self):
ipv4_rules_per_addr = 1
ipv4_addr_per_network = 2
ipv6_rules_per_addr = 1
ipv6_addr_per_network = 1
networks_count = 5
instance_ref = self._create_instance_ref()
_get_instance_nw_info = fake_network.fake_get_instance_nw_info
network_info = _get_instance_nw_info(self.stubs,
networks_count,
ipv4_addr_per_network)
network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
'1.1.1.1'
ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
network_info)
self.fw.prepare_instance_filter(instance_ref, network_info)
ipv4 = self.fw.iptables.ipv4['filter'].rules
ipv6 = self.fw.iptables.ipv6['filter'].rules
ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
# Extra rules are for the DHCP request
rules = (ipv4_rules_per_addr * ipv4_addr_per_network *
networks_count) + 2
self.assertEqual(ipv4_network_rules, rules)
self.assertEqual(ipv6_network_rules,
ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
def test_do_refresh_security_group_rules(self):
admin_ctxt = context.get_admin_context()
instance_ref = self._create_instance_ref()
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
secgroup = self._create_test_security_group()
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.instance_info[instance_ref['id']] = (instance_ref,
network_info)
self._validate_security_group()
# add a rule to the security group
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'udp',
'from_port': 200,
'to_port': 299,
'cidr': '192.168.99.0/24'})
# validate the extra rule
self.fw.refresh_security_group_rules(secgroup)
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p udp --dport 200:299'
' -s 192.168.99.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"Rules were not updated properly."
"The rule for UDP acceptance is missing")
def test_provider_firewall_rules(self):
# setup basic instance data
instance_ref = self._create_instance_ref()
# FRAGILE: as in libvirt tests
# peeks at how the firewall names chains
chain_name = 'inst-%s' % instance_ref['id']
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
self.fw.prepare_instance_filter(instance_ref, network_info)
self.assertIn('provider', self.fw.iptables.ipv4['filter'].chains)
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(0, len(rules))
admin_ctxt = context.get_admin_context()
# add a rule and send the update message, check for 1 rule
db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'tcp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
# Add another, refresh, and make sure number of rules goes to two
provider_fw1 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'udp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(2, len(rules))
# create the instance filter and make sure it has a jump rule
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == chain_name]
jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
provjump_rules = []
# IptablesTable doesn't make rules unique internally
for rule in jump_rules:
if 'provider' in rule.rule and rule not in provjump_rules:
provjump_rules.append(rule)
self.assertEqual(1, len(provjump_rules))
# remove a rule from the db, cast to compute to refresh rule
db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id'])
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
class XenAPISRSelectionTestCase(stubs.XenAPITestBaseNoDB):
"""Unit tests for testing we find the right SR."""
def test_safe_find_sr_raise_exception(self):
# Ensure StorageRepositoryNotFound is raise when wrong filter.
self.flags(sr_matching_filter='yadayadayada', group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = get_session()
self.assertRaises(exception.StorageRepositoryNotFound,
vm_utils.safe_find_sr, session)
def test_safe_find_sr_local_storage(self):
# Ensure the default local-storage is found.
self.flags(sr_matching_filter='other-config:i18n-key=local-storage',
group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = get_session()
# This test is only guaranteed if there is one host in the pool
self.assertEqual(len(xenapi_fake.get_all('host')), 1)
host_ref = xenapi_fake.get_all('host')[0]
pbd_refs = xenapi_fake.get_all('PBD')
for pbd_ref in pbd_refs:
pbd_rec = xenapi_fake.get_record('PBD', pbd_ref)
if pbd_rec['host'] != host_ref:
continue
sr_rec = xenapi_fake.get_record('SR', pbd_rec['SR'])
if sr_rec['other_config']['i18n-key'] == 'local-storage':
local_sr = pbd_rec['SR']
expected = vm_utils.safe_find_sr(session)
self.assertEqual(local_sr, expected)
def test_safe_find_sr_by_other_criteria(self):
# Ensure the SR is found when using a different filter.
self.flags(sr_matching_filter='other-config:my_fake_sr=true',
group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = get_session()
host_ref = xenapi_fake.get_all('host')[0]
local_sr = xenapi_fake.create_sr(name_label='Fake Storage',
type='lvm',
other_config={'my_fake_sr': 'true'},
host_ref=host_ref)
expected = vm_utils.safe_find_sr(session)
self.assertEqual(local_sr, expected)
def test_safe_find_sr_default(self):
# Ensure the default SR is found regardless of other-config.
self.flags(sr_matching_filter='default-sr:true',
group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = get_session()
pool_ref = session.call_xenapi('pool.get_all')[0]
expected = vm_utils.safe_find_sr(session)
self.assertEqual(session.call_xenapi('pool.get_default_SR', pool_ref),
expected)
def _create_service_entries(context, values={'avail_zone1': ['fake_host1',
'fake_host2'],
'avail_zone2': ['fake_host3'], }):
for avail_zone, hosts in values.iteritems():
for service_host in hosts:
db.service_create(context,
{'host': service_host,
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0})
return values
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIAggregateTestCase(stubs.XenAPITestBase):
"""Unit tests for aggregate operations."""
def setUp(self):
super(XenAPIAggregateTestCase, self).setUp()
self.flags(connection_url='http://test_url',
connection_username='test_user',
connection_password='test_pass',
group='xenserver')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
host='host',
compute_driver='xenapi.XenAPIDriver',
default_availability_zone='avail_zone1')
self.flags(use_local=True, group='conductor')
host_ref = xenapi_fake.get_all('host')[0]
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.context = context.get_admin_context()
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.compute = importutils.import_object(CONF.compute_manager)
self.api = compute_api.AggregateAPI()
values = {'name': 'test_aggr',
'metadata': {'availability_zone': 'test_zone',
pool_states.POOL_FLAG: 'XenAPI'}}
self.aggr = db.aggregate_create(self.context, values)
self.fake_metadata = {pool_states.POOL_FLAG: 'XenAPI',
'master_compute': 'host',
'availability_zone': 'fake_zone',
pool_states.KEY: pool_states.ACTIVE,
'host': xenapi_fake.get_record('host',
host_ref)['uuid']}
def test_pool_add_to_aggregate_called_by_driver(self):
calls = []
def pool_add_to_aggregate(context, aggregate, host, slave_info=None):
self.assertEqual("CONTEXT", context)
self.assertEqual("AGGREGATE", aggregate)
self.assertEqual("HOST", host)
self.assertEqual("SLAVEINFO", slave_info)
calls.append(pool_add_to_aggregate)
self.stubs.Set(self.conn._pool,
"add_to_aggregate",
pool_add_to_aggregate)
self.conn.add_to_aggregate("CONTEXT", "AGGREGATE", "HOST",
slave_info="SLAVEINFO")
self.assertIn(pool_add_to_aggregate, calls)
def test_pool_remove_from_aggregate_called_by_driver(self):
calls = []
def pool_remove_from_aggregate(context, aggregate, host,
slave_info=None):
self.assertEqual("CONTEXT", context)
self.assertEqual("AGGREGATE", aggregate)
self.assertEqual("HOST", host)
self.assertEqual("SLAVEINFO", slave_info)
calls.append(pool_remove_from_aggregate)
self.stubs.Set(self.conn._pool,
"remove_from_aggregate",
pool_remove_from_aggregate)
self.conn.remove_from_aggregate("CONTEXT", "AGGREGATE", "HOST",
slave_info="SLAVEINFO")
self.assertIn(pool_remove_from_aggregate, calls)
def test_add_to_aggregate_for_first_host_sets_metadata(self):
def fake_init_pool(id, name):
fake_init_pool.called = True
self.stubs.Set(self.conn._pool, "_init_pool", fake_init_pool)
aggregate = self._aggregate_setup()
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
result = db.aggregate_get(self.context, aggregate['id'])
self.assertTrue(fake_init_pool.called)
self.assertThat(self.fake_metadata,
matchers.DictMatches(result['metadetails']))
def test_join_slave(self):
# Ensure join_slave gets called when the request gets to master.
def fake_join_slave(id, compute_uuid, host, url, user, password):
fake_join_slave.called = True
self.stubs.Set(self.conn._pool, "_join_slave", fake_join_slave)
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
self.conn._pool.add_to_aggregate(self.context, aggregate, "host2",
dict(compute_uuid='fake_uuid',
url='fake_url',
user='fake_user',
passwd='fake_pass',
xenhost_uuid='fake_uuid'))
self.assertTrue(fake_join_slave.called)
def test_add_to_aggregate_first_host(self):
def fake_pool_set_name_label(self, session, pool_ref, name):
fake_pool_set_name_label.called = True
self.stubs.Set(xenapi_fake.SessionBase, "pool_set_name_label",
fake_pool_set_name_label)
self.conn._session.call_xenapi("pool.create", {"name": "asdf"})
metadata = {'availability_zone': 'fake_zone',
pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.CREATED}
aggregate = objects.Aggregate()
aggregate.name = 'fake_aggregate'
aggregate.metadata = dict(metadata)
aggregate.create(self.context)
aggregate.add_host('host')
self.assertEqual(["host"], aggregate.hosts)
self.assertEqual(metadata, aggregate.metadata)
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
self.assertTrue(fake_pool_set_name_label.called)
def test_remove_from_aggregate_called(self):
def fake_remove_from_aggregate(context, aggregate, host):
fake_remove_from_aggregate.called = True
self.stubs.Set(self.conn._pool,
"remove_from_aggregate",
fake_remove_from_aggregate)
self.conn.remove_from_aggregate(None, None, None)
self.assertTrue(fake_remove_from_aggregate.called)
def test_remove_from_empty_aggregate(self):
result = self._aggregate_setup()
self.assertRaises(exception.InvalidAggregateAction,
self.conn._pool.remove_from_aggregate,
self.context, result, "test_host")
def test_remove_slave(self):
# Ensure eject slave gets called.
def fake_eject_slave(id, compute_uuid, host_uuid):
fake_eject_slave.called = True
self.stubs.Set(self.conn._pool, "_eject_slave", fake_eject_slave)
self.fake_metadata['host2'] = 'fake_host2_uuid'
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata, aggr_state=pool_states.ACTIVE)
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host2")
self.assertTrue(fake_eject_slave.called)
def test_remove_master_solo(self):
# Ensure metadata are cleared after removal.
def fake_clear_pool(id):
fake_clear_pool.called = True
self.stubs.Set(self.conn._pool, "_clear_pool", fake_clear_pool)
aggregate = self._aggregate_setup(metadata=self.fake_metadata)
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host")
result = db.aggregate_get(self.context, aggregate['id'])
self.assertTrue(fake_clear_pool.called)
self.assertThat({'availability_zone': 'fake_zone',
pool_states.POOL_FLAG: 'XenAPI',
pool_states.KEY: pool_states.ACTIVE},
matchers.DictMatches(result['metadetails']))
def test_remote_master_non_empty_pool(self):
# Ensure AggregateError is raised if removing the master.
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
self.assertRaises(exception.InvalidAggregateAction,
self.conn._pool.remove_from_aggregate,
self.context, aggregate, "host")
def _aggregate_setup(self, aggr_name='fake_aggregate',
aggr_zone='fake_zone',
aggr_state=pool_states.CREATED,
hosts=['host'], metadata=None):
aggregate = objects.Aggregate()
aggregate.name = aggr_name
aggregate.metadata = {'availability_zone': aggr_zone,
pool_states.POOL_FLAG: 'XenAPI',
pool_states.KEY: aggr_state,
}
if metadata:
aggregate.metadata.update(metadata)
aggregate.create(self.context)
for aggregate_host in hosts:
aggregate.add_host(aggregate_host)
return aggregate
def test_add_host_to_aggregate_invalid_changing_status(self):
"""Ensure InvalidAggregateAction is raised when adding host while
aggregate is not ready.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
ex = self.assertRaises(exception.InvalidAggregateAction,
self.conn.add_to_aggregate, self.context,
aggregate, 'host')
self.assertIn('setup in progress', str(ex))
def test_add_host_to_aggregate_invalid_dismissed_status(self):
"""Ensure InvalidAggregateAction is raised when aggregate is
deleted.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
ex = self.assertRaises(exception.InvalidAggregateAction,
self.conn.add_to_aggregate, self.context,
aggregate, 'fake_host')
self.assertIn('aggregate deleted', str(ex))
def test_add_host_to_aggregate_invalid_error_status(self):
"""Ensure InvalidAggregateAction is raised when aggregate is
in error.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.ERROR)
ex = self.assertRaises(exception.InvalidAggregateAction,
self.conn.add_to_aggregate, self.context,
aggregate, 'fake_host')
self.assertIn('aggregate in error', str(ex))
def test_remove_host_from_aggregate_error(self):
# Ensure we can remove a host from an aggregate even if in error.
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
aggr = self.api.create_aggregate(self.context,
'fake_aggregate', fake_zone)
# let's mock the fact that the aggregate is ready!
metadata = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.ACTIVE}
db.aggregate_metadata_add(self.context, aggr['id'], metadata)
for aggregate_host in values[fake_zone]:
aggr = self.api.add_host_to_aggregate(self.context,
aggr['id'], aggregate_host)
# let's mock the fact that the aggregate is in error!
expected = self.api.remove_host_from_aggregate(self.context,
aggr['id'],
values[fake_zone][0])
self.assertEqual(len(aggr['hosts']) - 1, len(expected['hosts']))
self.assertEqual(expected['metadata'][pool_states.KEY],
pool_states.ACTIVE)
def test_remove_host_from_aggregate_invalid_dismissed_status(self):
"""Ensure InvalidAggregateAction is raised when aggregate is
deleted.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
self.assertRaises(exception.InvalidAggregateAction,
self.conn.remove_from_aggregate, self.context,
aggregate, 'fake_host')
def test_remove_host_from_aggregate_invalid_changing_status(self):
"""Ensure InvalidAggregateAction is raised when aggregate is
changing.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
self.assertRaises(exception.InvalidAggregateAction,
self.conn.remove_from_aggregate, self.context,
aggregate, 'fake_host')
def test_add_aggregate_host_raise_err(self):
# Ensure the undo operation works correctly on add.
def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
raise exception.AggregateError(
aggregate_id='', action='', reason='')
self.stubs.Set(self.compute.driver, "add_to_aggregate",
fake_driver_add_to_aggregate)
metadata = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.ACTIVE}
db.aggregate_metadata_add(self.context, self.aggr['id'], metadata)
db.aggregate_host_add(self.context, self.aggr['id'], 'fake_host')
self.assertRaises(exception.AggregateError,
self.compute.add_aggregate_host,
self.context, host="fake_host",
aggregate=jsonutils.to_primitive(self.aggr),
slave_info=None)
excepted = db.aggregate_get(self.context, self.aggr['id'])
self.assertEqual(excepted['metadetails'][pool_states.KEY],
pool_states.ERROR)
self.assertEqual(excepted['hosts'], [])
class MockComputeAPI(object):
def __init__(self):
self._mock_calls = []
def add_aggregate_host(self, ctxt, aggregate,
host_param, host, slave_info):
self._mock_calls.append((
self.add_aggregate_host, ctxt, aggregate,
host_param, host, slave_info))
def remove_aggregate_host(self, ctxt, aggregate_id, host_param,
host, slave_info):
self._mock_calls.append((
self.remove_aggregate_host, ctxt, aggregate_id,
host_param, host, slave_info))
class StubDependencies(object):
"""Stub dependencies for ResourcePool."""
def __init__(self):
self.compute_rpcapi = MockComputeAPI()
def _is_hv_pool(self, *_ignore):
return True
def _get_metadata(self, *_ignore):
return {
pool_states.KEY: {},
'master_compute': 'master'
}
def _create_slave_info(self, *ignore):
return "SLAVE_INFO"
class ResourcePoolWithStubs(StubDependencies, pool.ResourcePool):
"""A ResourcePool, use stub dependencies."""
class HypervisorPoolTestCase(test.NoDBTestCase):
fake_aggregate = {
'id': 98,
'hosts': [],
'metadata': {
'master_compute': 'master',
pool_states.POOL_FLAG: {},
pool_states.KEY: {}
}
}
def test_slave_asks_master_to_add_slave_to_pool(self):
slave = ResourcePoolWithStubs()
slave.add_to_aggregate("CONTEXT", self.fake_aggregate, "slave")
self.assertIn(
(slave.compute_rpcapi.add_aggregate_host,
"CONTEXT", jsonutils.to_primitive(self.fake_aggregate),
"slave", "master", "SLAVE_INFO"),
slave.compute_rpcapi._mock_calls)
def test_slave_asks_master_to_remove_slave_from_pool(self):
slave = ResourcePoolWithStubs()
slave.remove_from_aggregate("CONTEXT", self.fake_aggregate, "slave")
self.assertIn(
(slave.compute_rpcapi.remove_aggregate_host,
"CONTEXT", 98, "slave", "master", "SLAVE_INFO"),
slave.compute_rpcapi._mock_calls)
class SwapXapiHostTestCase(test.NoDBTestCase):
def test_swapping(self):
self.assertEqual(
"http://otherserver:8765/somepath",
pool.swap_xapi_host(
"http://someserver:8765/somepath", 'otherserver'))
def test_no_port(self):
self.assertEqual(
"http://otherserver/somepath",
pool.swap_xapi_host(
"http://someserver/somepath", 'otherserver'))
def test_no_path(self):
self.assertEqual(
"http://otherserver",
pool.swap_xapi_host(
"http://someserver", 'otherserver'))
class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
"""Unit tests for live_migration."""
def setUp(self):
super(XenAPILiveMigrateTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
host='host')
db_fakes.stub_out_db_instance_api(self.stubs)
self.context = context.get_admin_context()
def test_live_migration_calls_vmops(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_live_migrate(context, instance_ref, dest, post_method,
recover_method, block_migration, migrate_data):
fake_live_migrate.called = True
self.stubs.Set(self.conn._vmops, "live_migrate", fake_live_migrate)
self.conn.live_migration(None, None, None, None, None)
self.assertTrue(fake_live_migrate.called)
def test_pre_live_migration(self):
# ensure method is present
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.conn.pre_live_migration(None, None, None, None, None)
def test_post_live_migration_at_destination(self):
# ensure method is present
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
fake_instance = {"name": "name"}
fake_network_info = "network_info"
def fake_fw(instance, network_info):
self.assertEqual(instance, fake_instance)
self.assertEqual(network_info, fake_network_info)
fake_fw.call_count += 1
def fake_create_kernel_and_ramdisk(context, session, instance,
name_label):
return "fake-kernel-file", "fake-ramdisk-file"
fake_fw.call_count = 0
_vmops = self.conn._vmops
self.stubs.Set(_vmops.firewall_driver,
'setup_basic_filtering', fake_fw)
self.stubs.Set(_vmops.firewall_driver,
'prepare_instance_filter', fake_fw)
self.stubs.Set(_vmops.firewall_driver,
'apply_instance_filter', fake_fw)
self.stubs.Set(vm_utils, "create_kernel_and_ramdisk",
fake_create_kernel_and_ramdisk)
def fake_get_vm_opaque_ref(instance):
fake_get_vm_opaque_ref.called = True
self.stubs.Set(_vmops, "_get_vm_opaque_ref", fake_get_vm_opaque_ref)
fake_get_vm_opaque_ref.called = False
def fake_strip_base_mirror_from_vdis(session, vm_ref):
fake_strip_base_mirror_from_vdis.called = True
self.stubs.Set(vm_utils, "strip_base_mirror_from_vdis",
fake_strip_base_mirror_from_vdis)
fake_strip_base_mirror_from_vdis.called = False
self.conn.post_live_migration_at_destination(None, fake_instance,
fake_network_info, None)
self.assertEqual(fake_fw.call_count, 3)
self.assertTrue(fake_get_vm_opaque_ref.called)
self.assertTrue(fake_strip_base_mirror_from_vdis.called)
def test_check_can_live_migrate_destination_with_block_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf")
expected = {'block_migration': True,
'migrate_data': {
'migrate_send_data': "fake_migrate_data",
'destination_sr_ref': 'asdf'
}
}
result = self.conn.check_can_live_migrate_destination(self.context,
{'host': 'host'},
{}, {},
True, False)
self.assertEqual(expected, result)
def test_check_live_migrate_destination_verifies_ip(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
for pif_ref in xenapi_fake.get_all('PIF'):
pif_rec = xenapi_fake.get_record('PIF', pif_ref)
pif_rec['IP'] = ''
pif_rec['IPv6'] = ''
self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf")
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, {'host': 'host'},
{}, {},
True, False)
def test_check_can_live_migrate_destination_block_migration_fails(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, {'host': 'host'},
{}, {},
True, False)
def _add_default_live_migrate_stubs(self, conn):
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
pass
def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
return []
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
def fake_lookup_kernel_ramdisk(session, vm):
return ("fake_PV_kernel", "fake_PV_ramdisk")
self.stubs.Set(conn._vmops, "_generate_vdi_map",
fake_generate_vdi_map)
self.stubs.Set(conn._vmops, "_get_iscsi_srs",
fake_get_iscsi_srs)
self.stubs.Set(conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
self.stubs.Set(vm_utils, "lookup_kernel_ramdisk",
fake_lookup_kernel_ramdisk)
def test_check_can_live_migrate_source_with_block_migrate(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
dest_check_data = {'block_migration': True,
'migrate_data': {
'destination_sr_ref': None,
'migrate_send_data': None
}}
result = self.conn.check_can_live_migrate_source(self.context,
{'host': 'host'},
dest_check_data)
self.assertEqual(dest_check_data, result)
def test_check_can_live_migrate_source_with_block_migrate_iscsi(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
return ['sr_ref']
self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
fake_get_iscsi_srs)
def fake_make_plugin_call(plugin, method, **args):
return "true"
self.stubs.Set(self.conn._vmops, "_make_plugin_call",
fake_make_plugin_call)
dest_check_data = {'block_migration': True,
'migrate_data': {
'destination_sr_ref': None,
'migrate_send_data': None
}}
result = self.conn.check_can_live_migrate_source(self.context,
{'host': 'host'},
dest_check_data)
self.assertEqual(dest_check_data, result)
def test_check_can_live_migrate_source_with_block_iscsi_fails(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
return ['sr_ref']
self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
fake_get_iscsi_srs)
def fake_make_plugin_call(plugin, method, **args):
return {'returncode': 'error', 'message': 'Plugin not found'}
self.stubs.Set(self.conn._vmops, "_make_plugin_call",
fake_make_plugin_call)
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_source,
self.context, {'host': 'host'},
{})
def test_check_can_live_migrate_source_with_block_migrate_fails(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
dest_check_data = {'block_migration': True,
'migrate_data': {
'destination_sr_ref': None,
'migrate_send_data': None
}}
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_source,
self.context,
{'host': 'host'},
dest_check_data)
def test_check_can_live_migrate_works(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_aggregate_get_by_host(context, host, key=None):
self.assertEqual(CONF.host, host)
return [dict(test_aggregate.fake_aggregate,
metadetails={"host": "test_host_uuid"})]
self.stubs.Set(db, "aggregate_get_by_host",
fake_aggregate_get_by_host)
self.conn.check_can_live_migrate_destination(self.context,
{'host': 'host'}, False, False)
def test_check_can_live_migrate_fails(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_aggregate_get_by_host(context, host, key=None):
self.assertEqual(CONF.host, host)
return [dict(test_aggregate.fake_aggregate,
metadetails={"dest_other": "test_host_uuid"})]
self.stubs.Set(db, "aggregate_get_by_host",
fake_aggregate_get_by_host)
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, {'host': 'host'}, None, None)
def test_live_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def fake_get_host_opaque_ref(context, destination_hostname):
return "fake_host"
self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
fake_get_host_opaque_ref)
def post_method(context, instance, destination_hostname,
block_migration, migrate_data):
post_method.called = True
self.conn.live_migration(self.conn, None, None, post_method, None)
self.assertTrue(post_method.called, "post_method.called")
def test_live_migration_on_failure(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def fake_get_host_opaque_ref(context, destination_hostname):
return "fake_host"
self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
fake_get_host_opaque_ref)
def fake_call_xenapi(*args):
raise NotImplementedError()
self.stubs.Set(self.conn._vmops._session, "call_xenapi",
fake_call_xenapi)
def recover_method(context, instance, destination_hostname,
block_migration):
recover_method.called = True
self.assertRaises(NotImplementedError, self.conn.live_migration,
self.conn, None, None, None, recover_method)
self.assertTrue(recover_method.called, "recover_method.called")
def test_live_migration_calls_post_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def post_method(context, instance, destination_hostname,
block_migration, migrate_data):
post_method.called = True
# pass block_migration = True and migrate data
migrate_data = {"destination_sr_ref": "foo",
"migrate_send_data": "bar"}
self.conn.live_migration(self.conn, None, None, post_method, None,
True, migrate_data)
self.assertTrue(post_method.called, "post_method.called")
def test_live_migration_block_cleans_srs(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def fake_get_iscsi_srs(context, instance):
return ['sr_ref']
self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
fake_get_iscsi_srs)
def fake_forget_sr(context, instance):
fake_forget_sr.called = True
self.stubs.Set(volume_utils, "forget_sr",
fake_forget_sr)
def post_method(context, instance, destination_hostname,
block_migration, migrate_data):
post_method.called = True
migrate_data = {"destination_sr_ref": "foo",
"migrate_send_data": "bar"}
self.conn.live_migration(self.conn, None, None, post_method, None,
True, migrate_data)
self.assertTrue(post_method.called, "post_method.called")
self.assertTrue(fake_forget_sr.called, "forget_sr.called")
def test_live_migration_with_block_migration_raises_invalid_param(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def recover_method(context, instance, destination_hostname,
block_migration):
recover_method.called = True
# pass block_migration = True and no migrate data
self.assertRaises(exception.InvalidParameterValue,
self.conn.live_migration, self.conn,
None, None, None, recover_method, True, None)
self.assertTrue(recover_method.called, "recover_method.called")
def test_live_migration_with_block_migration_fails_migrate_send(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def recover_method(context, instance, destination_hostname,
block_migration):
recover_method.called = True
# pass block_migration = True and migrate data
migrate_data = dict(destination_sr_ref='foo', migrate_send_data='bar')
self.assertRaises(exception.MigrationError,
self.conn.live_migration, self.conn,
None, None, None, recover_method, True, migrate_data)
self.assertTrue(recover_method.called, "recover_method.called")
def test_live_migrate_block_migration_xapi_call_parameters(self):
fake_vdi_map = object()
class Session(xenapi_fake.SessionBase):
def VM_migrate_send(self_, session, vmref, migrate_data, islive,
vdi_map, vif_map, options):
self.assertEqual('SOMEDATA', migrate_data)
self.assertEqual(fake_vdi_map, vdi_map)
stubs.stubout_session(self.stubs, Session)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(conn)
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
return fake_vdi_map
self.stubs.Set(conn._vmops, "_generate_vdi_map",
fake_generate_vdi_map)
def dummy_callback(*args, **kwargs):
pass
conn.live_migration(
self.context, instance=dict(name='ignore'), dest=None,
post_method=dummy_callback, recover_method=dummy_callback,
block_migration="SOMEDATA",
migrate_data=dict(migrate_send_data='SOMEDATA',
destination_sr_ref="TARGET_SR_OPAQUE_REF"))
def test_live_migrate_pool_migration_xapi_call_parameters(self):
class Session(xenapi_fake.SessionBase):
def VM_pool_migrate(self_, session, vm_ref, host_ref, options):
self.assertEqual("fake_ref", host_ref)
self.assertEqual({"live": "true"}, options)
raise IOError()
stubs.stubout_session(self.stubs, Session)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(conn)
def fake_get_host_opaque_ref(context, destination):
return "fake_ref"
self.stubs.Set(conn._vmops, "_get_host_opaque_ref",
fake_get_host_opaque_ref)
def dummy_callback(*args, **kwargs):
pass
self.assertRaises(IOError, conn.live_migration,
self.context, instance=dict(name='ignore'), dest=None,
post_method=dummy_callback, recover_method=dummy_callback,
block_migration=False, migrate_data={})
def test_generate_vdi_map(self):
stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm_ref = "fake_vm_ref"
def fake_find_sr(_session):
self.assertEqual(conn._session, _session)
return "source_sr_ref"
self.stubs.Set(vm_utils, "safe_find_sr", fake_find_sr)
def fake_get_instance_vdis_for_sr(_session, _vm_ref, _sr_ref):
self.assertEqual(conn._session, _session)
self.assertEqual(vm_ref, _vm_ref)
self.assertEqual("source_sr_ref", _sr_ref)
return ["vdi0", "vdi1"]
self.stubs.Set(vm_utils, "get_instance_vdis_for_sr",
fake_get_instance_vdis_for_sr)
result = conn._vmops._generate_vdi_map("dest_sr_ref", vm_ref)
self.assertEqual({"vdi0": "dest_sr_ref",
"vdi1": "dest_sr_ref"}, result)
def test_rollback_live_migration_at_destination(self):
stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(conn, "destroy") as mock_destroy:
conn.rollback_live_migration_at_destination("context",
"instance", [], None)
self.assertFalse(mock_destroy.called)
class XenAPIInjectMetadataTestCase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(XenAPIInjectMetadataTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.xenstore = dict(persist={}, ephem={})
self.called_fake_get_vm_opaque_ref = False
def fake_get_vm_opaque_ref(inst, instance):
self.called_fake_get_vm_opaque_ref = True
if instance["uuid"] == "not_found":
raise exception.NotFound
self.assertEqual(instance, {'uuid': 'fake'})
return 'vm_ref'
def fake_add_to_param_xenstore(inst, vm_ref, key, val):
self.assertEqual(vm_ref, 'vm_ref')
self.xenstore['persist'][key] = val
def fake_remove_from_param_xenstore(inst, vm_ref, key):
self.assertEqual(vm_ref, 'vm_ref')
if key in self.xenstore['persist']:
del self.xenstore['persist'][key]
def fake_write_to_xenstore(inst, instance, path, value, vm_ref=None):
self.assertEqual(instance, {'uuid': 'fake'})
self.assertEqual(vm_ref, 'vm_ref')
self.xenstore['ephem'][path] = jsonutils.dumps(value)
def fake_delete_from_xenstore(inst, instance, path, vm_ref=None):
self.assertEqual(instance, {'uuid': 'fake'})
self.assertEqual(vm_ref, 'vm_ref')
if path in self.xenstore['ephem']:
del self.xenstore['ephem'][path]
self.stubs.Set(vmops.VMOps, '_get_vm_opaque_ref',
fake_get_vm_opaque_ref)
self.stubs.Set(vmops.VMOps, '_add_to_param_xenstore',
fake_add_to_param_xenstore)
self.stubs.Set(vmops.VMOps, '_remove_from_param_xenstore',
fake_remove_from_param_xenstore)
self.stubs.Set(vmops.VMOps, '_write_to_xenstore',
fake_write_to_xenstore)
self.stubs.Set(vmops.VMOps, '_delete_from_xenstore',
fake_delete_from_xenstore)
def test_inject_instance_metadata(self):
# Add some system_metadata to ensure it doesn't get added
# to xenstore
instance = dict(metadata=[{'key': 'a', 'value': 1},
{'key': 'b', 'value': 2},
{'key': 'c', 'value': 3},
# Check xenstore key sanitizing
{'key': 'hi.there', 'value': 4},
{'key': 'hi!t.e/e', 'value': 5}],
# Check xenstore key sanitizing
system_metadata=[{'key': 'sys_a', 'value': 1},
{'key': 'sys_b', 'value': 2},
{'key': 'sys_c', 'value': 3}],
uuid='fake')
self.conn._vmops._inject_instance_metadata(instance, 'vm_ref')
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
'vm-data/user-metadata/hi_there': '4',
'vm-data/user-metadata/hi_t_e_e': '5',
},
'ephem': {},
})
def test_change_instance_metadata_add(self):
# Test XenStore key sanitizing here, too.
diff = {'test.key': ['+', 4]}
instance = {'uuid': 'fake'}
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
}
self.conn._vmops.change_instance_metadata(instance, diff)
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
'vm-data/user-metadata/test_key': '4',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
'vm-data/user-metadata/test_key': '4',
},
})
def test_change_instance_metadata_update(self):
diff = dict(b=['+', 4])
instance = {'uuid': 'fake'}
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
}
self.conn._vmops.change_instance_metadata(instance, diff)
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '4',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '4',
'vm-data/user-metadata/c': '3',
},
})
def test_change_instance_metadata_delete(self):
diff = dict(b=['-'])
instance = {'uuid': 'fake'}
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
}
self.conn._vmops.change_instance_metadata(instance, diff)
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/c': '3',
},
})
def test_change_instance_metadata_not_found(self):
instance = {'uuid': 'not_found'}
self.conn._vmops.change_instance_metadata(instance, "fake_diff")
self.assertTrue(self.called_fake_get_vm_opaque_ref)
class XenAPISessionTestCase(test.NoDBTestCase):
def _get_mock_xapisession(self, software_version):
class MockXapiSession(xenapi_session.XenAPISession):
def __init__(_ignore):
"Skip the superclass's dirty init"
def _get_software_version(_ignore):
return software_version
return MockXapiSession()
def test_local_session(self):
session = self._get_mock_xapisession({})
session.is_local_connection = True
session.XenAPI = self.mox.CreateMockAnything()
session.XenAPI.xapi_local().AndReturn("local_connection")
self.mox.ReplayAll()
self.assertEqual("local_connection",
session._create_session("unix://local"))
def test_remote_session(self):
session = self._get_mock_xapisession({})
session.is_local_connection = False
session.XenAPI = self.mox.CreateMockAnything()
session.XenAPI.Session("url").AndReturn("remote_connection")
self.mox.ReplayAll()
self.assertEqual("remote_connection", session._create_session("url"))
def test_get_product_version_product_brand_does_not_fail(self):
session = self._get_mock_xapisession({
'build_number': '0',
'date': '2012-08-03',
'hostname': 'komainu',
'linux': '3.2.0-27-generic',
'network_backend': 'bridge',
'platform_name': 'XCP_Kronos',
'platform_version': '1.6.0',
'xapi': '1.3',
'xen': '4.1.2',
'xencenter_max': '1.10',
'xencenter_min': '1.10'
})
self.assertEqual(
((1, 6, 0), None),
session._get_product_version_and_brand()
)
def test_get_product_version_product_brand_xs_6(self):
session = self._get_mock_xapisession({
'product_brand': 'XenServer',
'product_version': '6.0.50',
'platform_version': '0.0.1'
})
self.assertEqual(
((6, 0, 50), 'XenServer'),
session._get_product_version_and_brand()
)
def test_verify_plugin_version_same(self):
session = self._get_mock_xapisession({})
session.PLUGIN_REQUIRED_VERSION = '2.4'
self.mox.StubOutWithMock(session, 'call_plugin_serialized')
session.call_plugin_serialized('nova_plugin_version', 'get_version',
).AndReturn("2.4")
self.mox.ReplayAll()
session._verify_plugin_version()
def test_verify_plugin_version_compatible(self):
session = self._get_mock_xapisession({})
session.XenAPI = xenapi_fake.FakeXenAPI()
session.PLUGIN_REQUIRED_VERSION = '2.4'
self.mox.StubOutWithMock(session, 'call_plugin_serialized')
session.call_plugin_serialized('nova_plugin_version', 'get_version',
).AndReturn("2.5")
self.mox.ReplayAll()
session._verify_plugin_version()
def test_verify_plugin_version_bad_maj(self):
session = self._get_mock_xapisession({})
session.XenAPI = xenapi_fake.FakeXenAPI()
session.PLUGIN_REQUIRED_VERSION = '2.4'
self.mox.StubOutWithMock(session, 'call_plugin_serialized')
session.call_plugin_serialized('nova_plugin_version', 'get_version',
).AndReturn("3.0")
self.mox.ReplayAll()
self.assertRaises(xenapi_fake.Failure, session._verify_plugin_version)
def test_verify_plugin_version_bad_min(self):
session = self._get_mock_xapisession({})
session.XenAPI = xenapi_fake.FakeXenAPI()
session.PLUGIN_REQUIRED_VERSION = '2.4'
self.mox.StubOutWithMock(session, 'call_plugin_serialized')
session.call_plugin_serialized('nova_plugin_version', 'get_version',
).AndReturn("2.3")
self.mox.ReplayAll()
self.assertRaises(xenapi_fake.Failure, session._verify_plugin_version)
def test_verify_current_version_matches(self):
session = self._get_mock_xapisession({})
# Import the plugin to extract its version
path = os.path.dirname(__file__)
rel_path_elem = "../../../../plugins/xenserver/xenapi/etc/xapi.d/" \
"plugins/nova_plugin_version"
for elem in rel_path_elem.split('/'):
path = os.path.join(path, elem)
path = os.path.realpath(path)
plugin_version = None
with open(path) as plugin_file:
for line in plugin_file:
if "PLUGIN_VERSION = " in line:
plugin_version = line.strip()[17:].strip('"')
self.assertEqual(session.PLUGIN_REQUIRED_VERSION,
plugin_version)
class XenAPIFakeTestCase(test.NoDBTestCase):
def test_query_matches(self):
record = {'a': '1', 'b': '2', 'c_d': '3'}
tests = {'field "a"="1"': True,
'field "b"="2"': True,
'field "b"="4"': False,
'not field "b"="4"': True,
'field "a"="1" and field "b"="4"': False,
'field "a"="1" or field "b"="4"': True,
'field "c__d"="3"': True,
'field \'b\'=\'2\'': True,
}
for query in tests.keys():
expected = tests[query]
fail_msg = "for test '%s'" % query
self.assertEqual(xenapi_fake._query_matches(record, query),
expected, fail_msg)
def test_query_bad_format(self):
record = {'a': '1', 'b': '2', 'c': '3'}
tests = ['"a"="1" or "b"="4"',
'a=1',
]
for query in tests:
fail_msg = "for test '%s'" % query
self.assertFalse(xenapi_fake._query_matches(record, query),
fail_msg)
| 42.388998 | 79 | 0.599443 |
a69d585fab8eb70c138a43596cbea29f0663dff6 | 32,340 | py | Python | saleor/graphql/product/types/products.py | GvS666/saleor | f482f7162a966acc58057fdff6fd7eae68aa4a21 | [
"CC-BY-4.0"
] | 1 | 2020-11-13T14:25:51.000Z | 2020-11-13T14:25:51.000Z | saleor/graphql/product/types/products.py | GvS666/saleor | f482f7162a966acc58057fdff6fd7eae68aa4a21 | [
"CC-BY-4.0"
] | 13 | 2021-02-02T22:59:20.000Z | 2022-03-12T00:36:53.000Z | saleor/graphql/product/types/products.py | GvS666/saleor | f482f7162a966acc58057fdff6fd7eae68aa4a21 | [
"CC-BY-4.0"
] | null | null | null | from dataclasses import asdict
from typing import List, Union
import graphene
import graphene_django_optimizer as gql_optimizer
from django.db.models import Prefetch
from graphene import relay
from graphene_federation import key
from graphql.error import GraphQLError
from ....core.permissions import ProductPermissions
from ....product import models
from ....product.templatetags.product_images import (
get_product_image_thumbnail,
get_thumbnail,
)
from ....product.utils import calculate_revenue_for_variant
from ....product.utils.availability import (
get_product_availability,
get_variant_availability,
)
from ....product.utils.costs import get_margin_for_variant, get_product_costs_data
from ....warehouse import models as stock_models
from ....warehouse.availability import (
get_available_quantity,
get_available_quantity_for_customer,
get_quantity_allocated,
is_product_in_stock,
is_variant_in_stock,
)
from ...core.connection import CountableDjangoObjectType
from ...core.enums import ReportingPeriod, TaxRateType
from ...core.fields import FilterInputConnectionField, PrefetchingConnectionField
from ...core.resolvers import resolve_meta, resolve_private_meta
from ...core.types import (
Image,
MetadataObjectType,
Money,
MoneyRange,
TaxedMoney,
TaxedMoneyRange,
TaxType,
)
from ...decorators import permission_required
from ...translations.fields import TranslationField
from ...translations.types import (
CategoryTranslation,
CollectionTranslation,
ProductTranslation,
ProductVariantTranslation,
)
from ...utils import get_database_id, reporting_period_to_date
from ...warehouse.types import Stock
from ..filters import AttributeFilterInput
from ..resolvers import resolve_attributes
from .attributes import Attribute, SelectedAttribute
from .digital_contents import DigitalContent
def prefetch_products(info, *_args, **_kwargs):
"""Prefetch products visible to the current user.
Can be used with models that have the `products` relationship. The queryset
of products being prefetched is filtered based on permissions of the
requesting user, to restrict access to unpublished products from non-staff
users.
"""
user = info.context.user
qs = models.Product.objects.visible_to_user(user)
return Prefetch(
"products",
queryset=gql_optimizer.query(qs, info),
to_attr="prefetched_products",
)
def prefetch_products_collection_sorted(info, *_args, **_kwargs):
user = info.context.user
qs = models.Product.objects.collection_sorted(user)
return Prefetch(
"products",
queryset=gql_optimizer.query(qs, info),
to_attr="prefetched_products",
)
def resolve_attribute_list(
instance: Union[models.Product, models.ProductVariant], *, user
) -> List[SelectedAttribute]:
"""Resolve attributes from a product into a list of `SelectedAttribute`s.
Note: you have to prefetch the below M2M fields.
- product_type -> attribute[rel] -> [rel]assignments -> values
- product_type -> attribute[rel] -> attribute
"""
resolved_attributes = []
attributes_qs = None
# Retrieve the product type
if isinstance(instance, models.Product):
product_type = instance.product_type
product_type_attributes_assoc_field = "attributeproduct"
assigned_attribute_instance_field = "productassignments"
assigned_attribute_instance_filters = {"product_id": instance.pk}
if hasattr(product_type, "storefront_attributes"):
attributes_qs = product_type.storefront_attributes # type: ignore
elif isinstance(instance, models.ProductVariant):
product_type = instance.product.product_type
product_type_attributes_assoc_field = "attributevariant"
assigned_attribute_instance_field = "variantassignments"
assigned_attribute_instance_filters = {"variant_id": instance.pk}
else:
raise AssertionError(f"{instance.__class__.__name__} is unsupported")
# Retrieve all the product attributes assigned to this product type
if not attributes_qs:
attributes_qs = getattr(product_type, product_type_attributes_assoc_field)
attributes_qs = attributes_qs.get_visible_to_user(user)
# An empty QuerySet for unresolved values
empty_qs = models.AttributeValue.objects.none()
# Goes through all the attributes assigned to the product type
# The assigned values are returned as a QuerySet, but will assign a
# dummy empty QuerySet if no values are assigned to the given instance.
for attr_data_rel in attributes_qs:
attr_instance_data = getattr(attr_data_rel, assigned_attribute_instance_field)
# Retrieve the instance's associated data
attr_data = attr_instance_data.filter(**assigned_attribute_instance_filters)
attr_data = attr_data.first()
# Return the instance's attribute values if the assignment was found,
# otherwise it sets the values as an empty QuerySet
values = attr_data.values.all() if attr_data is not None else empty_qs
resolved_attributes.append(
SelectedAttribute(attribute=attr_data_rel.attribute, values=values)
)
return resolved_attributes
class Margin(graphene.ObjectType):
start = graphene.Int()
stop = graphene.Int()
class BasePricingInfo(graphene.ObjectType):
on_sale = graphene.Boolean(description="Whether it is in sale or not.")
discount = graphene.Field(
TaxedMoney, description="The discount amount if in sale (null otherwise)."
)
discount_local_currency = graphene.Field(
TaxedMoney, description="The discount amount in the local currency."
)
class VariantPricingInfo(BasePricingInfo):
discount_local_currency = graphene.Field(
TaxedMoney, description="The discount amount in the local currency."
)
price = graphene.Field(
TaxedMoney, description="The price, with any discount subtracted."
)
price_undiscounted = graphene.Field(
TaxedMoney, description="The price without any discount."
)
price_local_currency = graphene.Field(
TaxedMoney, description="The discounted price in the local currency."
)
class Meta:
description = "Represents availability of a variant in the storefront."
class ProductPricingInfo(BasePricingInfo):
price_range = graphene.Field(
TaxedMoneyRange,
description="The discounted price range of the product variants.",
)
price_range_undiscounted = graphene.Field(
TaxedMoneyRange,
description="The undiscounted price range of the product variants.",
)
price_range_local_currency = graphene.Field(
TaxedMoneyRange,
description=(
"The discounted price range of the product variants "
"in the local currency."
),
)
class Meta:
description = "Represents availability of a product in the storefront."
@key(fields="id")
class ProductVariant(CountableDjangoObjectType, MetadataObjectType):
quantity = graphene.Int(
required=True,
description="Quantity of a product in the store's possession, "
"including the allocated stock that is waiting for shipment.",
deprecation_reason="This field will be removed in Saleor 2.11. "
"Use the stock field instead.",
)
quantity_allocated = graphene.Int(
required=False,
description="Quantity allocated for orders",
deprecation_reason="This field will be removed in Saleor 2.11. "
"Use the stock field instead.",
)
stock_quantity = graphene.Int(
required=True,
description="Quantity of a product available for sale.",
deprecation_reason="This field will be removed in Saleor 2.11. "
"Use the stock field instead.",
)
price_override = graphene.Field(
Money,
description=(
"Override the base price of a product if necessary. A value of `null` "
"indicates that the default product price is used."
),
)
pricing = graphene.Field(
VariantPricingInfo,
description=(
"Lists the storefront variant's pricing, the current price and discounts, "
"only meant for displaying."
),
)
is_available = graphene.Boolean(
description="Whether the variant is in stock and visible or not.",
deprecation_reason="This field will be removed in Saleor 2.11. "
"Use the stock field instead.",
)
attributes = gql_optimizer.field(
graphene.List(
graphene.NonNull(SelectedAttribute),
required=True,
description="List of attributes assigned to this variant.",
)
)
cost_price = graphene.Field(Money, description="Cost price of the variant.")
margin = graphene.Int(description="Gross margin percentage value.")
quantity_ordered = graphene.Int(description="Total quantity ordered.")
revenue = graphene.Field(
TaxedMoney,
period=graphene.Argument(ReportingPeriod),
description=(
"Total revenue generated by a variant in given period of time. Note: this "
"field should be queried using `reportProductSales` query as it uses "
"optimizations suitable for such calculations."
),
)
images = gql_optimizer.field(
graphene.List(
lambda: ProductImage, description="List of images for the product variant."
),
model_field="images",
)
translation = TranslationField(
ProductVariantTranslation, type_name="product variant"
)
digital_content = gql_optimizer.field(
graphene.Field(
DigitalContent, description="Digital content for the product variant."
),
model_field="digital_content",
)
stock = gql_optimizer.field(
graphene.Field(
graphene.List(Stock),
description="Stocks for the product variant.",
country=graphene.String(required=False),
)
)
class Meta:
description = (
"Represents a version of a product such as different size or color."
)
only_fields = ["id", "name", "product", "sku", "track_inventory", "weight"]
interfaces = [relay.Node]
model = models.ProductVariant
@staticmethod
def resolve_stock(root: models.ProductVariant, info, country=None):
if country is None:
return gql_optimizer.query(
root.stock.annotate_available_quantity().all(), info
)
return gql_optimizer.query(
root.stock.annotate_available_quantity().for_country(country).all(), info
)
@staticmethod
@permission_required(ProductPermissions.MANAGE_PRODUCTS)
def resolve_digital_content(root: models.ProductVariant, *_args):
return getattr(root, "digital_content", None)
@staticmethod
def resolve_stock_quantity(root: models.ProductVariant, info):
country = info.context.country
try:
stock = stock_models.Stock.objects.get_variant_stock_for_country(
country, root
)
except stock_models.Stock.DoesNotExist:
return 0
return get_available_quantity_for_customer(stock)
@staticmethod
@gql_optimizer.resolver_hints(
prefetch_related=["attributes__values", "attributes__assignment__attribute"]
)
def resolve_attributes(root: models.ProductVariant, info):
return resolve_attribute_list(root, user=info.context.user)
@staticmethod
@permission_required(ProductPermissions.MANAGE_PRODUCTS)
def resolve_margin(root: models.ProductVariant, *_args):
return get_margin_for_variant(root)
@staticmethod
@permission_required(ProductPermissions.MANAGE_PRODUCTS)
def resolve_cost_price(root: models.ProductVariant, *_args):
return root.cost_price
@staticmethod
def resolve_price(root: models.ProductVariant, *_args):
return root.base_price
@staticmethod
@gql_optimizer.resolver_hints(
prefetch_related=("product",), only=["price_override_amount", "currency"]
)
def resolve_pricing(root: models.ProductVariant, info):
context = info.context
availability = get_variant_availability(
root,
context.discounts,
context.country,
context.currency,
extensions=context.extensions,
)
return VariantPricingInfo(**asdict(availability))
@staticmethod
def resolve_is_available(root: models.ProductVariant, info):
country = info.context.country
return is_variant_in_stock(root, country)
@staticmethod
@permission_required(ProductPermissions.MANAGE_PRODUCTS)
def resolve_price_override(root: models.ProductVariant, *_args):
return root.price_override
@staticmethod
@permission_required(ProductPermissions.MANAGE_PRODUCTS)
def resolve_quantity(root: models.ProductVariant, info):
return get_available_quantity(root, info.context.country)
@staticmethod
@permission_required(ProductPermissions.MANAGE_PRODUCTS)
def resolve_quantity_ordered(root: models.ProductVariant, *_args):
# This field is added through annotation when using the
# `resolve_report_product_sales` resolver.
return getattr(root, "quantity_ordered", None)
@permission_required(ProductPermissions.MANAGE_PRODUCTS)
def resolve_quantity_allocated(root: models.ProductVariant, info):
country = info.context.country
return get_quantity_allocated(root, country)
@staticmethod
@permission_required(ProductPermissions.MANAGE_PRODUCTS)
def resolve_revenue(root: models.ProductVariant, *_args, period):
start_date = reporting_period_to_date(period)
return calculate_revenue_for_variant(root, start_date)
@staticmethod
def resolve_images(root: models.ProductVariant, *_args):
return root.images.all()
@classmethod
def get_node(cls, info, id):
user = info.context.user
visible_products = models.Product.objects.visible_to_user(user).values_list(
"pk", flat=True
)
qs = cls._meta.model.objects.filter(product__id__in=visible_products)
return cls.maybe_optimize(info, qs, id)
@staticmethod
@permission_required(ProductPermissions.MANAGE_PRODUCTS)
def resolve_private_meta(root, _info):
return resolve_private_meta(root, _info)
@staticmethod
def resolve_meta(root, _info):
return resolve_meta(root, _info)
@staticmethod
def __resolve_reference(root, _info, **_kwargs):
return graphene.Node.get_node_from_global_id(_info, root.id)
@key(fields="id")
class Product(CountableDjangoObjectType, MetadataObjectType):
url = graphene.String(
description="The storefront URL for the product.", required=True
)
thumbnail = graphene.Field(
Image,
description="The main thumbnail for a product.",
size=graphene.Argument(graphene.Int, description="Size of thumbnail."),
)
pricing = graphene.Field(
ProductPricingInfo,
description=(
"Lists the storefront product's pricing, the current price and discounts, "
"only meant for displaying."
),
)
is_available = graphene.Boolean(
description="Whether the product is in stock and visible or not."
)
base_price = graphene.Field(Money, description="The product's default base price.")
minimal_variant_price = graphene.Field(
Money, description="The price of the cheapest variant (including discounts)."
)
tax_type = graphene.Field(
TaxType, description="A type of tax. Assigned by enabled tax gateway"
)
attributes = graphene.List(
graphene.NonNull(SelectedAttribute),
required=True,
description="List of attributes assigned to this product.",
)
purchase_cost = graphene.Field(MoneyRange)
margin = graphene.Field(Margin)
image_by_id = graphene.Field(
lambda: ProductImage,
id=graphene.Argument(graphene.ID, description="ID of a product image."),
description="Get a single product image by ID.",
)
variants = gql_optimizer.field(
graphene.List(ProductVariant, description="List of variants for the product."),
model_field="variants",
)
images = gql_optimizer.field(
graphene.List(
lambda: ProductImage, description="List of images for the product."
),
model_field="images",
)
collections = gql_optimizer.field(
graphene.List(
lambda: Collection, description="List of collections for the product."
),
model_field="collections",
)
translation = TranslationField(ProductTranslation, type_name="product")
class Meta:
description = "Represents an individual item for sale in the storefront."
interfaces = [relay.Node]
model = models.Product
only_fields = [
"category",
"charge_taxes",
"description",
"description_json",
"id",
"is_published",
"name",
"slug",
"product_type",
"publication_date",
"seo_description",
"seo_title",
"updated_at",
"weight",
]
@staticmethod
def resolve_tax_type(root: models.Product, info):
tax_data = info.context.extensions.get_tax_code_from_object_meta(root)
return TaxType(tax_code=tax_data.code, description=tax_data.description)
@staticmethod
@gql_optimizer.resolver_hints(prefetch_related="images")
def resolve_thumbnail(root: models.Product, info, *, size=255):
image = root.get_first_image()
if image:
url = get_product_image_thumbnail(image, size, method="thumbnail")
alt = image.alt
return Image(alt=alt, url=info.context.build_absolute_uri(url))
return None
@staticmethod
def resolve_url(root: models.Product, *_args):
return root.get_absolute_url()
@staticmethod
@gql_optimizer.resolver_hints(
prefetch_related=("variants", "collections"),
only=["publication_date", "charge_taxes", "price_amount", "currency", "meta"],
)
def resolve_pricing(root: models.Product, info):
context = info.context
availability = get_product_availability(
root,
context.discounts,
context.country,
context.currency,
context.extensions,
)
return ProductPricingInfo(**asdict(availability))
@staticmethod
@gql_optimizer.resolver_hints(prefetch_related=("variants"))
def resolve_is_available(root: models.Product, info):
country = info.context.country
in_stock = is_product_in_stock(root, country)
return root.is_visible and in_stock
@staticmethod
@permission_required(ProductPermissions.MANAGE_PRODUCTS)
def resolve_base_price(root: models.Product, _info):
return root.price
@staticmethod
@gql_optimizer.resolver_hints(
prefetch_related=("variants", "collections"),
only=["publication_date", "charge_taxes", "price_amount", "currency", "meta"],
)
def resolve_price(root: models.Product, info):
price_range = root.get_price_range(info.context.discounts)
price = info.context.extensions.apply_taxes_to_product(
root, price_range.start, info.context.country
)
return price.net
@staticmethod
@gql_optimizer.resolver_hints(
prefetch_related=[
Prefetch(
"product_type__attributeproduct",
queryset=models.AttributeProduct.objects.filter(
attribute__visible_in_storefront=True
).prefetch_related("productassignments__values", "attribute"),
to_attr="storefront_attributes",
)
]
)
def resolve_attributes(root: models.Product, info):
return resolve_attribute_list(root, user=info.context.user)
@staticmethod
@permission_required(ProductPermissions.MANAGE_PRODUCTS)
def resolve_purchase_cost(root: models.Product, *_args):
purchase_cost, _ = get_product_costs_data(root)
return purchase_cost
@staticmethod
@permission_required(ProductPermissions.MANAGE_PRODUCTS)
def resolve_margin(root: models.Product, *_args):
_, margin = get_product_costs_data(root)
return Margin(margin[0], margin[1])
@staticmethod
def resolve_image_by_id(root: models.Product, info, id):
pk = get_database_id(info, id, ProductImage)
try:
return root.images.get(pk=pk)
except models.ProductImage.DoesNotExist:
raise GraphQLError("Product image not found.")
@staticmethod
@gql_optimizer.resolver_hints(model_field="images")
def resolve_images(root: models.Product, *_args, **_kwargs):
return root.images.all()
@staticmethod
def resolve_variants(root: models.Product, *_args, **_kwargs):
return root.variants.all()
@staticmethod
def resolve_collections(root: models.Product, *_args):
return root.collections.all()
@classmethod
def get_node(cls, info, pk):
if info.context:
qs = cls._meta.model.objects.visible_to_user(info.context.user)
return cls.maybe_optimize(info, qs, pk)
return None
@staticmethod
@permission_required(ProductPermissions.MANAGE_PRODUCTS)
def resolve_private_meta(root, _info):
return resolve_private_meta(root, _info)
@staticmethod
def resolve_meta(root, _info):
return resolve_meta(root, _info)
@staticmethod
def __resolve_reference(root, _info, **_kwargs):
return graphene.Node.get_node_from_global_id(_info, root.id)
@key(fields="id")
class ProductType(CountableDjangoObjectType, MetadataObjectType):
products = gql_optimizer.field(
PrefetchingConnectionField(
Product, description="List of products of this type."
),
prefetch_related=prefetch_products,
)
tax_rate = TaxRateType(description="A type of tax rate.")
tax_type = graphene.Field(
TaxType, description="A type of tax. Assigned by enabled tax gateway"
)
variant_attributes = graphene.List(
Attribute, description="Variant attributes of that product type."
)
product_attributes = graphene.List(
Attribute, description="Product attributes of that product type."
)
available_attributes = gql_optimizer.field(
FilterInputConnectionField(Attribute, filter=AttributeFilterInput())
)
class Meta:
description = (
"Represents a type of product. It defines what attributes are available to "
"products of this type."
)
interfaces = [relay.Node]
model = models.ProductType
only_fields = [
"has_variants",
"id",
"is_digital",
"is_shipping_required",
"name",
"slug",
"weight",
"tax_type",
]
@staticmethod
def resolve_tax_type(root: models.ProductType, info):
tax_data = info.context.extensions.get_tax_code_from_object_meta(root)
return TaxType(tax_code=tax_data.code, description=tax_data.description)
@staticmethod
def resolve_tax_rate(root: models.ProductType, _info, **_kwargs):
# FIXME this resolver should be dropped after we drop tax_rate from API
if not hasattr(root, "meta"):
return None
tax = root.meta.get("taxes", {}).get("vatlayer", {})
return tax.get("code")
@staticmethod
@gql_optimizer.resolver_hints(
prefetch_related="product_attributes__attributeproduct"
)
def resolve_product_attributes(root: models.ProductType, *_args, **_kwargs):
return root.product_attributes.product_attributes_sorted().all()
@staticmethod
@gql_optimizer.resolver_hints(
prefetch_related="variant_attributes__attributevariant"
)
def resolve_variant_attributes(root: models.ProductType, *_args, **_kwargs):
return root.variant_attributes.variant_attributes_sorted().all()
@staticmethod
def resolve_products(root: models.ProductType, info, **_kwargs):
if hasattr(root, "prefetched_products"):
return root.prefetched_products # type: ignore
qs = root.products.visible_to_user(info.context.user)
return gql_optimizer.query(qs, info)
@staticmethod
@permission_required(ProductPermissions.MANAGE_PRODUCTS)
def resolve_available_attributes(root: models.ProductType, info, **kwargs):
qs = models.Attribute.objects.get_unassigned_attributes(root.pk)
return resolve_attributes(info, qs=qs, **kwargs)
@staticmethod
@permission_required(ProductPermissions.MANAGE_PRODUCTS)
def resolve_private_meta(root, _info):
return resolve_private_meta(root, _info)
@staticmethod
def resolve_meta(root, _info):
return resolve_meta(root, _info)
@staticmethod
def __resolve_reference(root, _info, **_kwargs):
return graphene.Node.get_node_from_global_id(_info, root.id)
@key(fields="id")
class Collection(CountableDjangoObjectType, MetadataObjectType):
products = gql_optimizer.field(
PrefetchingConnectionField(
Product, description="List of products in this collection."
),
prefetch_related=prefetch_products_collection_sorted,
)
background_image = graphene.Field(
Image, size=graphene.Int(description="Size of the image.")
)
translation = TranslationField(CollectionTranslation, type_name="collection")
class Meta:
description = "Represents a collection of products."
only_fields = [
"description",
"description_json",
"id",
"is_published",
"name",
"publication_date",
"seo_description",
"seo_title",
"slug",
]
interfaces = [relay.Node]
model = models.Collection
@staticmethod
def resolve_background_image(root: models.Collection, info, size=None, **_kwargs):
if root.background_image:
return Image.get_adjusted(
image=root.background_image,
alt=root.background_image_alt,
size=size,
rendition_key_set="background_images",
info=info,
)
@staticmethod
def resolve_products(root: models.Collection, info, **_kwargs):
if hasattr(root, "prefetched_products"):
return root.prefetched_products # type: ignore
qs = root.products.collection_sorted(info.context.user)
return gql_optimizer.query(qs, info)
@classmethod
def get_node(cls, info, id):
if info.context:
user = info.context.user
qs = cls._meta.model.objects.visible_to_user(user)
return cls.maybe_optimize(info, qs, id)
return None
@staticmethod
@permission_required(ProductPermissions.MANAGE_PRODUCTS)
def resolve_private_meta(root, _info):
return resolve_private_meta(root, _info)
@staticmethod
def resolve_meta(root, _info):
return resolve_meta(root, _info)
@staticmethod
def __resolve_reference(root, _info, **_kwargs):
return graphene.Node.get_node_from_global_id(_info, root.id)
@key(fields="id")
class Category(CountableDjangoObjectType, MetadataObjectType):
ancestors = PrefetchingConnectionField(
lambda: Category, description="List of ancestors of the category."
)
products = gql_optimizer.field(
PrefetchingConnectionField(
Product, description="List of products in the category."
),
prefetch_related=prefetch_products,
)
# Deprecated. To remove in #5022
url = graphene.String(description="The storefront's URL for the category.")
children = PrefetchingConnectionField(
lambda: Category, description="List of children of the category."
)
background_image = graphene.Field(
Image, size=graphene.Int(description="Size of the image.")
)
translation = TranslationField(CategoryTranslation, type_name="category")
class Meta:
description = (
"Represents a single category of products. Categories allow to organize "
"products in a tree-hierarchies which can be used for navigation in the "
"storefront."
)
only_fields = [
"description",
"description_json",
"id",
"level",
"name",
"parent",
"seo_description",
"seo_title",
"slug",
]
interfaces = [relay.Node]
model = models.Category
@staticmethod
def resolve_ancestors(root: models.Category, info, **_kwargs):
qs = root.get_ancestors()
return gql_optimizer.query(qs, info)
@staticmethod
def resolve_background_image(root: models.Category, info, size=None, **_kwargs):
if root.background_image:
return Image.get_adjusted(
image=root.background_image,
alt=root.background_image_alt,
size=size,
rendition_key_set="background_images",
info=info,
)
@staticmethod
def resolve_children(root: models.Category, info, **_kwargs):
qs = root.children.all()
return gql_optimizer.query(qs, info)
# Deprecated. To remove in #5022
@staticmethod
def resolve_url(root: models.Category, _info):
return root.get_absolute_url()
@staticmethod
def resolve_products(root: models.Category, info, **_kwargs):
# If the category has no children, we use the prefetched data.
children = root.children.all()
if not children and hasattr(root, "prefetched_products"):
return root.prefetched_products
# Otherwise we want to include products from child categories which
# requires performing additional logic.
tree = root.get_descendants(include_self=True)
qs = models.Product.objects.published()
qs = qs.filter(category__in=tree)
return gql_optimizer.query(qs, info)
@staticmethod
@permission_required(ProductPermissions.MANAGE_PRODUCTS)
def resolve_private_meta(root, _info):
return resolve_private_meta(root, _info)
@staticmethod
def resolve_meta(root, _info):
return resolve_meta(root, _info)
@staticmethod
def __resolve_reference(root, _info, **_kwargs):
return graphene.Node.get_node_from_global_id(_info, root.id)
@key(fields="id")
class ProductImage(CountableDjangoObjectType):
url = graphene.String(
required=True,
description="The URL of the image.",
size=graphene.Int(description="Size of the image."),
)
class Meta:
description = "Represents a product image."
only_fields = ["alt", "id", "sort_order"]
interfaces = [relay.Node]
model = models.ProductImage
@staticmethod
def resolve_url(root: models.ProductImage, info, *, size=None):
if size:
url = get_thumbnail(root.image, size, method="thumbnail")
else:
url = root.image.url
return info.context.build_absolute_uri(url)
@staticmethod
def __resolve_reference(root, _info, **_kwargs):
return graphene.Node.get_node_from_global_id(_info, root.id)
class MoveProductInput(graphene.InputObjectType):
product_id = graphene.ID(
description="The ID of the product to move.", required=True
)
sort_order = graphene.Int(
description=(
"The relative sorting position of the product (from -inf to +inf) "
"starting from the first given product's actual position."
)
)
| 35.499451 | 88 | 0.675232 |
f4cd140ec3bbb68040d45159323e479b9d92e426 | 8,490 | py | Python | openselfsup/models/necks.py | bin20192019/OpenSelfSup | 6891da7612b9ddfbd06beb7ffad3592513d190bb | [
"Apache-2.0"
] | 2 | 2020-06-23T14:28:24.000Z | 2020-06-23T14:28:31.000Z | openselfsup/models/necks.py | Yipeng-Sun/OpenSelfSup | 55af4e9ba1934e1e1c18f208481db97ed507db76 | [
"Apache-2.0"
] | null | null | null | openselfsup/models/necks.py | Yipeng-Sun/OpenSelfSup | 55af4e9ba1934e1e1c18f208481db97ed507db76 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
from distutils.version import StrictVersion
from mmcv.cnn import kaiming_init, normal_init
from .registry import NECKS
from .utils import build_norm_layer
@NECKS.register_module
class LinearNeck(nn.Module):
def __init__(self, in_channels, out_channels, with_avg_pool=True):
super(LinearNeck, self).__init__()
self.with_avg_pool = with_avg_pool
if with_avg_pool:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(in_channels, out_channels)
def init_weights(self, init_linear='normal'):
assert init_linear in ['normal', 'kaiming'], \
"Undefined init_linear: {}".format(init_linear)
for m in self.modules():
if isinstance(m, nn.Linear):
if init_linear == 'normal':
normal_init(m, std=0.01)
else:
kaiming_init(m, mode='fan_in', nonlinearity='relu')
elif isinstance(m,
(nn.BatchNorm2d, nn.GroupNorm, nn.SyncBatchNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
assert len(x) == 1
if self.with_avg_pool:
x = self.avgpool(x[0])
return [self.fc(x.view(x.size(0), -1))]
@NECKS.register_module
class NonLinearNeckV0(nn.Module):
'''The non-linear neck in ODC
'''
def __init__(self,
in_channels,
hid_channels,
out_channels,
with_avg_pool=True):
super(NonLinearNeckV0, self).__init__()
self.with_avg_pool = with_avg_pool
if with_avg_pool:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.mlp = nn.Sequential(
nn.Linear(in_channels, hid_channels),
nn.BatchNorm1d(hid_channels, momentum=0.001, affine=False),
nn.ReLU(inplace=True), nn.Dropout(),
nn.Linear(hid_channels, out_channels), nn.ReLU(inplace=True))
def init_weights(self, init_linear='normal'):
assert init_linear in ['normal', 'kaiming'], \
"Undefined init_linear: {}".format(init_linear)
for m in self.modules():
if isinstance(m, nn.Linear):
if init_linear == 'normal':
normal_init(m, std=0.01)
else:
kaiming_init(m, mode='fan_in', nonlinearity='relu')
elif isinstance(m,
(nn.BatchNorm1d, nn.BatchNorm2d, nn.GroupNorm, nn.SyncBatchNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
assert len(x) == 1
if self.with_avg_pool:
x = self.avgpool(x[0])
return [self.mlp(x.view(x.size(0), -1))]
@NECKS.register_module
class NonLinearNeckV1(nn.Module):
'''The non-linear neck in MoCO v2: fc-relu-fc
'''
def __init__(self,
in_channels,
hid_channels,
out_channels,
with_avg_pool=True):
super(NonLinearNeckV1, self).__init__()
self.with_avg_pool = with_avg_pool
if with_avg_pool:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.mlp = nn.Sequential(
nn.Linear(in_channels, hid_channels), nn.ReLU(inplace=True),
nn.Linear(hid_channels, out_channels))
def init_weights(self, init_linear='normal'):
assert init_linear in ['normal', 'kaiming'], \
"Undefined init_linear: {}".format(init_linear)
for m in self.modules():
if isinstance(m, nn.Linear):
if init_linear == 'normal':
normal_init(m, std=0.01)
else:
kaiming_init(m, mode='fan_in', nonlinearity='relu')
elif isinstance(m,
(nn.BatchNorm2d, nn.GroupNorm, nn.SyncBatchNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
assert len(x) == 1
if self.with_avg_pool:
x = self.avgpool(x[0])
return [self.mlp(x.view(x.size(0), -1))]
@NECKS.register_module
class NonLinearNeckSimCLR(nn.Module):
'''SimCLR non-linear neck.
Structure: fc(no_bias)-bn(has_bias)-[relu-fc(no_bias)-bn(no_bias)].
The substructures in [] can be repeated. For the SimCLR default setting,
the repeat time is 1.
However, PyTorch does not support to specify (weight=True, bias=False).
It only support \"affine\" including the weight and bias. Hence, the
second BatchNorm has bias in this implementation. This is different from
the offical implementation of SimCLR.
Since SyncBatchNorm in pytorch<1.4.0 does not support 2D input, the input is
expanded to 4D with shape: (N,C,1,1). I am not sure if this workaround
has no bugs. See the pull request here:
https://github.com/pytorch/pytorch/pull/29626
Arguments:
num_layers (int): number of fc layers, it is 2 in the SimCLR default setting.
'''
def __init__(self,
in_channels,
hid_channels,
out_channels,
num_layers=2,
with_avg_pool=True):
super(NonLinearNeckSimCLR, self).__init__()
self.with_avg_pool = with_avg_pool
if with_avg_pool:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
if StrictVersion(torch.__version__) < StrictVersion("1.4.0"):
self.expand_for_syncbn = True
else:
self.expand_for_syncbn = False
self.relu = nn.ReLU(inplace=True)
self.fc0 = nn.Linear(in_channels, hid_channels, bias=False)
_, self.bn0 = build_norm_layer(
dict(type='SyncBN'), hid_channels)
self.fc_names = []
self.bn_names = []
for i in range(1, num_layers):
this_channels = out_channels if i == num_layers - 1 \
else hid_channels
self.add_module(
"fc{}".format(i),
nn.Linear(hid_channels, this_channels, bias=False))
self.add_module(
"bn{}".format(i),
build_norm_layer(dict(type='SyncBN'), this_channels)[1])
self.fc_names.append("fc{}".format(i))
self.bn_names.append("bn{}".format(i))
def init_weights(self, init_linear='normal'):
assert init_linear in ['normal', 'kaiming'], \
"Undefined init_linear: {}".format(init_linear)
for m in self.modules():
if isinstance(m, nn.Linear):
if init_linear == 'normal':
normal_init(m, std=0.01)
else:
kaiming_init(m, mode='fan_in', nonlinearity='relu')
elif isinstance(m,
(nn.BatchNorm2d, nn.GroupNorm, nn.SyncBatchNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _forward_syncbn(self, module, x):
assert x.dim() == 2
if self.expand_for_syncbn:
x = module(x.unsqueeze(-1).unsqueeze(-1)).squeeze(-1).squeeze(-1)
else:
x = module(x)
return x
def forward(self, x):
assert len(x) == 1
if self.with_avg_pool:
x = self.avgpool(x[0])
x = x.view(x.size(0), -1)
x = self.fc0(x)
x = self._forward_syncbn(self.bn0, x)
for fc_name, bn_name in zip(self.fc_names, self.bn_names):
fc = getattr(self, fc_name)
bn = getattr(self, bn_name)
x = self.relu(x)
x = fc(x)
x = self._forward_syncbn(bn, x)
return [x]
@NECKS.register_module
class AvgPoolNeck(nn.Module):
def __init__(self):
super(AvgPoolNeck, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
def init_weights(self, **kwargs):
pass
def forward(self, x):
assert len(x) == 1
return [self.avg_pool(x[0])]
| 36.437768 | 94 | 0.560777 |
9628e35d2c3e438f9c2a9a4704b129e5e7a3b650 | 4,176 | py | Python | icevision/parsers/voc_parser.py | Addono/icevision | 5207de65f780735bdf8ed9d4d01ce72ad852aa9c | [
"Apache-2.0"
] | null | null | null | icevision/parsers/voc_parser.py | Addono/icevision | 5207de65f780735bdf8ed9d4d01ce72ad852aa9c | [
"Apache-2.0"
] | null | null | null | icevision/parsers/voc_parser.py | Addono/icevision | 5207de65f780735bdf8ed9d4d01ce72ad852aa9c | [
"Apache-2.0"
] | 1 | 2021-03-20T16:54:45.000Z | 2021-03-20T16:54:45.000Z | __all__ = ["voc", "VocXmlParser", "VocMaskParser"]
import xml.etree.ElementTree as ET
from icevision.imports import *
from icevision.utils import *
from icevision.core import *
from icevision.parsers.parser import *
from icevision.parsers.defaults import *
from icevision.parsers.mixins import *
def voc(
annotations_dir: Union[str, Path],
images_dir: Union[str, Path],
class_map: Optional[ClassMap] = None,
masks_dir: Optional[Union[str, Path]] = None,
idmap: Optional[IDMap] = None,
):
if not masks_dir:
return VocXmlParser(
annotations_dir=annotations_dir,
images_dir=images_dir,
class_map=class_map,
idmap=idmap,
)
else:
return VocMaskParser(
annotations_dir=annotations_dir,
images_dir=images_dir,
masks_dir=masks_dir,
class_map=class_map,
idmap=idmap,
)
class VocXmlParser(Parser, FilepathMixin, SizeMixin, LabelsMixin, BBoxesMixin):
def __init__(
self,
annotations_dir: Union[str, Path],
images_dir: Union[str, Path],
class_map: Optional[ClassMap] = None,
idmap: Optional[IDMap] = None,
):
super().__init__(class_map=class_map, idmap=idmap)
self.images_dir = Path(images_dir)
self.annotations_dir = Path(annotations_dir)
self.annotation_files = get_files(self.annotations_dir, extensions=[".xml"])
def __len__(self):
return len(self.annotation_files)
def __iter__(self):
yield from self.annotation_files
def prepare(self, o):
tree = ET.parse(str(o))
self._root = tree.getroot()
self._filename = self._root.find("filename").text
self._size = self._root.find("size")
def imageid(self, o) -> Hashable:
return str(Path(self._filename).stem)
def filepath(self, o) -> Union[str, Path]:
return self.images_dir / self._filename
def image_width_height(self, o) -> Tuple[int, int]:
return get_image_size(self.filepath(o))
def labels(self, o) -> List[Hashable]:
labels = []
for object in self._root.iter("object"):
label = object.find("name").text
labels.append(label)
return labels
def bboxes(self, o) -> List[BBox]:
def to_int(x):
return int(float(x))
bboxes = []
for object in self._root.iter("object"):
xml_bbox = object.find("bndbox")
xmin = to_int(xml_bbox.find("xmin").text)
ymin = to_int(xml_bbox.find("ymin").text)
xmax = to_int(xml_bbox.find("xmax").text)
ymax = to_int(xml_bbox.find("ymax").text)
bbox = BBox.from_xyxy(xmin, ymin, xmax, ymax)
bboxes.append(bbox)
return bboxes
class VocMaskParser(VocXmlParser, MasksMixin):
def __init__(
self,
annotations_dir: Union[str, Path],
images_dir: Union[str, Path],
masks_dir: Union[str, Path],
class_map: Optional[ClassMap] = None,
idmap: Optional[IDMap] = None,
):
super().__init__(
annotations_dir=annotations_dir,
images_dir=images_dir,
class_map=class_map,
idmap=idmap,
)
self.masks_dir = masks_dir
self.mask_files = get_image_files(masks_dir)
self._imageid2maskfile = {self.imageid_mask(o): o for o in self.mask_files}
# filter annotations
masks_ids = frozenset(self._imageid2maskfile.keys())
self._intersection = []
for item in super().__iter__():
super().prepare(item)
if super().imageid(item) in masks_ids:
self._intersection.append(item)
def __len__(self):
return len(self._intersection)
def __iter__(self):
yield from self._intersection
def imageid_mask(self, o) -> Hashable:
"""Should return the same as `imageid` from parent parser."""
return str(Path(o).stem)
def masks(self, o) -> List[Mask]:
mask_file = self._imageid2maskfile[self.imageid(o)]
return [VocMaskFile(mask_file)]
| 30.26087 | 84 | 0.612308 |
347534963123a3e9a56866b756814c55afd74e11 | 6,833 | py | Python | Traveling_Salesperson/genSalesperson.py | Wason1797/Fun-Python | 1432aec98423f13cc228c34c53bdb19ba4efe1da | [
"MIT"
] | 11 | 2018-11-21T19:34:48.000Z | 2019-01-13T04:30:44.000Z | Traveling_Salesperson/genSalesperson.py | Wason1797/Fun-Python | 1432aec98423f13cc228c34c53bdb19ba4efe1da | [
"MIT"
] | null | null | null | Traveling_Salesperson/genSalesperson.py | Wason1797/Fun-Python | 1432aec98423f13cc228c34c53bdb19ba4efe1da | [
"MIT"
] | null | null | null | import pygame
import sys
from pygame.locals import *
import random as rd
import math as mt
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
WHITE = (255, 255, 255)
pygame.init()
pygame.font.init()
window = pygame.display.set_mode((700, 500), 0, 32)
pygame.display.set_caption('Traveling Salesperson Genetic Approach')
_with, _height = window.get_size()
cities = []
# cities = [(385, 121), (109, 259), (381, 213),
# (188, 454), (308, 322), (274, 454), (176, 106), (563, 206)]
num_cities = 9
population = []
p_size = 600 # p_size mus not be greater than factorial(num_cities)
max_generations = 40
fitness = []
min_distance = 0
best_path = []
def swap(arr, index_a, index_b):
arr[index_a], arr[index_b] = arr[index_b], arr[index_a]
def text_to_screen(text, x, y, size, _color=(200, 000, 000), font_type='Arial'):
text = str(text)
font = pygame.font.SysFont(font_type, size)
text = font.render(text, True, _color)
window.blit(text, (x, y))
def draw_circle(start, radius, _color):
pygame.draw.circle(window, _color, start, radius)
def draw_line(start, end, _stroke, _color):
pygame.draw.line(window, _color, start, end, _stroke)
def calculate_distance(point_a, point_b):
return pow((point_a[0] - point_b[0]), 2) + pow((point_a[1] - point_b[1]), 2)
def calculate_path_distance(points, _order):
total_distance = 0
for j in range(len(_order)-1):
total_distance += calculate_distance(points[_order[j]],
points[_order[j+1]])
return total_distance
def setup():
for counter in range(num_cities):
cities.append((rd.randint(5, _with-10), rd.randint(5, _height-10)))
first_order = list(range(num_cities))
shuffle(first_order, num_cities//2)
population.append(first_order)
for counter in range(p_size-1):
while first_order in population:
next_order = first_order.copy()
shuffle(next_order, num_cities//2)
first_order = next_order.copy()
population.append(first_order)
def calculate_population_fitness():
min_distance_local = mt.inf
best_path_local = []
for ind in population:
_distance = calculate_path_distance(cities, ind)
if _distance < min_distance_local:
min_distance_local = _distance
best_path_local = ind.copy()
fitness.append(1/(_distance+1))
return best_path_local, min_distance_local
def normalize_fitness():
fit_sum = 0
for fit in fitness:
fit_sum += fit
for index in range(len(fitness)):
fitness[index] = fitness[index]/fit_sum
def shuffle(arr, times):
for counter in range(times):
index_a = rd.randint(0, len(arr)-1)
index_b = rd.randint(0, len(arr)-1)
swap(arr, index_a, index_b)
def pick_fitness_based(pop_arr, fit_arr):
index = 0
r = rd.random()
while r > 0:
r = r - fit_arr[index]
index += 1
index -= 1
return pop_arr[index].copy()
def mutate(_parent, _rate):
for counter in range(num_cities):
if rd.random() < _rate:
index = rd.randint(0, len(_parent)-1) % (len(_parent)-1)
swap(_parent, index, index+1)
def cross_over_b(order_a, order_b):
new_order = []
index = 0
while index < len(order_a)-1:
dist_a = calculate_distance(cities[order_a[index]], cities[order_a[index+1]])
dist_b = calculate_distance(cities[order_b[index]], cities[order_b[index+1]])
if dist_a < dist_b:
new_order.append(order_a[index])
new_order.append(order_a[index+1])
else:
new_order.append(order_b[index])
new_order.append(order_b[index + 1])
index += 2
return new_order
def cross_over(order_a, order_b):
start = rd.randint(0, (len(order_a)-1))
if start == (len(order_a)-1):
start -= 1
end = rd.randint(start+1, (len(order_a)-1))
new_order = order_a[start:end]
for item in order_b:
if item not in new_order:
new_order.append(item)
return new_order
def next_generation():
new_population = []
for j in range(len(population)):
order_a = pick_fitness_based(population, fitness)
order_b = pick_fitness_based(population, fitness)
order = cross_over(order_a, order_b)
mutate(order, 0.25)
new_population.append(order)
return new_population
if __name__ == '__main__':
generation_count = 0
new_min_distance = 1
setup()
best_path, min_distance = calculate_population_fitness()
normalize_fitness()
stop_draw = None
range_search = range(len(population[0])-1)
range_best = range(len(best_path)-1)
print(cities)
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if stop_draw is None:
for path in population:
for city in cities:
draw_circle(city, 7, RED)
for i in range_search:
draw_line(cities[path[i]], cities[path[i+1]], 1, WHITE)
for i in range_best:
draw_line(cities[best_path[i]], cities[best_path[i+1]], 4, GREEN)
text_to_screen("Generation: " + str(generation_count) + " Distance: " + str(min_distance), 10, _height-21, 20, WHITE)
pygame.display.update()
pygame.time.wait(1)
window.fill(BLACK)
stop_draw = True
elif generation_count < max_generations:
population = next_generation()
fitness.clear()
new_best_path, new_min_distance = calculate_population_fitness()
if new_min_distance <= min_distance:
min_distance = new_min_distance
best_path = new_best_path
print(generation_count)
print(best_path)
print(min_distance)
normalize_fitness()
generation_count += 1
stop_draw = None
print(new_min_distance)
print(min_distance / new_min_distance)
# elif min_distance/new_min_distance < 0.9999:
# print(new_min_distance)
# print(min_distance / new_min_distance)
# population = next_generation()
# fitness.clear()
# new_best_path, new_min_distance = calculate_population_fitness()
# if new_min_distance < min_distance:
# min_distance = new_min_distance
# best_path = new_best_path
# print(generation_count)
# print(best_path)
# print(min_distance)
# normalize_fitness()
# generation_count += 1
# stop_draw = None
| 31.344037 | 133 | 0.610566 |
e2d2f1bcbfe013b282d95daa72fb77f9b1a86141 | 757 | py | Python | acme_test.py | stefanoruiz/acme | 027dbcbab797aac7272b63f07aeccbc6f6d936ed | [
"MIT"
] | null | null | null | acme_test.py | stefanoruiz/acme | 027dbcbab797aac7272b63f07aeccbc6f6d936ed | [
"MIT"
] | null | null | null | acme_test.py | stefanoruiz/acme | 027dbcbab797aac7272b63f07aeccbc6f6d936ed | [
"MIT"
] | null | null | null | import unittest
from acme import Product
from acme_report import generate_products, ADJECTIVES, NOUNS
class AcmeProductTests(unittest.TestCase):
"""Making sure Acme products are the tops!"""
def test_default_product_price(self):
"""Test default product price being 10."""
prod = Product('Test Product')
self.assertEqual(prod.price, 10)
def test_explosiveness(self):
"""Test explosiveness."""
prod = Product('Test Product')
self.assertTrue(prod.explode(), '...boom!')
class AcmeReportTests(unittest.TestCase):
def test_default_num_products(self):
prod = generate_products()
print(len(prod))
self.assertTrue(prod, 25)
if __name__ == '__main__':
unittest.main()
| 28.037037 | 60 | 0.676354 |
a9bdb0b80954ff5e2bf48ea98c59a51d8e5665e8 | 11,397 | py | Python | test/test_lsh.py | andriyor/datasketch | ec36c20caeda6a1fbcbefcd92161bb9d2a8b8d78 | [
"MIT"
] | null | null | null | test/test_lsh.py | andriyor/datasketch | ec36c20caeda6a1fbcbefcd92161bb9d2a8b8d78 | [
"MIT"
] | null | null | null | test/test_lsh.py | andriyor/datasketch | ec36c20caeda6a1fbcbefcd92161bb9d2a8b8d78 | [
"MIT"
] | null | null | null | import unittest
import pickle
import numpy as np
import mockredis
from mock import patch
from datasketch.lsh import MinHashLSH
from datasketch.minhash import MinHash
from datasketch.weighted_minhash import WeightedMinHashGenerator
def fake_redis(**kwargs):
redis = mockredis.mock_redis_client(**kwargs)
redis.connection_pool = None
redis.response_callbacks = None
return redis
class TestMinHashLSH(unittest.TestCase):
def test_init(self):
lsh = MinHashLSH(threshold=0.8)
self.assertTrue(lsh.is_empty())
b1, r1 = lsh.b, lsh.r
lsh = MinHashLSH(threshold=0.8, weights=(0.2,0.8))
b2, r2 = lsh.b, lsh.r
self.assertTrue(b1 < b2)
self.assertTrue(r1 > r2)
def test__H(self):
'''
Check _H output consistent bytes length given
the same concatenated hash value size
'''
for l in range(2, 128+1, 16):
lsh = MinHashLSH(num_perm=128)
m = MinHash()
m.update("abcdefg".encode("utf8"))
m.update("1234567".encode("utf8"))
lsh.insert("m", m)
sizes = [len(H) for ht in lsh.hashtables for H in ht]
self.assertTrue(all(sizes[0] == s for s in sizes))
def test_insert(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
for t in lsh.hashtables:
self.assertTrue(len(t) >= 1)
items = []
for H in t:
items.extend(t[H])
self.assertTrue("a" in items)
self.assertTrue("b" in items)
self.assertTrue("a" in lsh)
self.assertTrue("b" in lsh)
for i, H in enumerate(lsh.keys["a"]):
self.assertTrue("a" in lsh.hashtables[i][H])
m3 = MinHash(18)
self.assertRaises(ValueError, lsh.insert, "c", m3)
def test_query(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
result = lsh.query(m1)
self.assertTrue("a" in result)
result = lsh.query(m2)
self.assertTrue("b" in result)
m3 = MinHash(18)
self.assertRaises(ValueError, lsh.query, m3)
def test_query_buffer(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
lsh.add_to_query_buffer(m1)
result = lsh.collect_query_buffer()
self.assertTrue("a" in result)
lsh.add_to_query_buffer(m2)
result = lsh.collect_query_buffer()
self.assertTrue("b" in result)
m3 = MinHash(18)
self.assertRaises(ValueError, lsh.add_to_query_buffer, m3)
def test_remove(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
lsh.remove("a")
self.assertTrue("a" not in lsh.keys)
for table in lsh.hashtables:
for H in table:
self.assertGreater(len(table[H]), 0)
self.assertTrue("a" not in table[H])
self.assertRaises(ValueError, lsh.remove, "c")
def test_pickle(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
lsh2 = pickle.loads(pickle.dumps(lsh))
result = lsh2.query(m1)
self.assertTrue("a" in result)
result = lsh2.query(m2)
self.assertTrue("b" in result)
def test_insert_redis(self):
with patch('redis.Redis', fake_redis) as mock_redis:
lsh = MinHashLSH(threshold=0.5, num_perm=16, storage_config={
'type': 'redis', 'redis': {'host': 'localhost', 'port': 6379}
})
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
for t in lsh.hashtables:
self.assertTrue(len(t) >= 1)
items = []
for H in t:
items.extend(t[H])
self.assertTrue(pickle.dumps("a") in items)
self.assertTrue(pickle.dumps("b") in items)
self.assertTrue("a" in lsh)
self.assertTrue("b" in lsh)
for i, H in enumerate(lsh.keys[pickle.dumps("a")]):
self.assertTrue(pickle.dumps("a") in lsh.hashtables[i][H])
m3 = MinHash(18)
self.assertRaises(ValueError, lsh.insert, "c", m3)
def test_query_redis(self):
with patch('redis.Redis', fake_redis) as mock_redis:
lsh = MinHashLSH(threshold=0.5, num_perm=16, storage_config={
'type': 'redis', 'redis': {'host': 'localhost', 'port': 6379}
})
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
result = lsh.query(m1)
self.assertTrue("a" in result)
result = lsh.query(m2)
self.assertTrue("b" in result)
m3 = MinHash(18)
self.assertRaises(ValueError, lsh.query, m3)
def test_query_buffer_redis(self):
with patch('redis.Redis', fake_redis) as mock_redis:
lsh = MinHashLSH(threshold=0.5, num_perm=16, storage_config={
'type': 'redis', 'redis': {'host': 'localhost', 'port': 6379}
})
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
lsh.query(m1)
lsh.add_to_query_buffer(m1)
result = lsh.collect_query_buffer()
self.assertTrue("a" in result)
lsh.add_to_query_buffer(m2)
result = lsh.collect_query_buffer()
self.assertTrue("b" in result)
m3 = MinHash(18)
self.assertRaises(ValueError, lsh.add_to_query_buffer, m3)
def test_insertion_session(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
data = [("a", m1), ("b", m2)]
with lsh.insertion_session() as session:
for key, minhash in data:
session.insert(key, minhash)
for t in lsh.hashtables:
self.assertTrue(len(t) >= 1)
items = []
for H in t:
items.extend(t[H])
self.assertTrue("a" in items)
self.assertTrue("b" in items)
self.assertTrue("a" in lsh)
self.assertTrue("b" in lsh)
for i, H in enumerate(lsh.keys["a"]):
self.assertTrue("a" in lsh.hashtables[i][H])
def test_get_counts(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
counts = lsh.get_counts()
self.assertEqual(len(counts), lsh.b)
for table in counts:
self.assertEqual(sum(table.values()), 2)
class TestWeightedMinHashLSH(unittest.TestCase):
def test_init(self):
lsh = MinHashLSH(threshold=0.8)
self.assertTrue(lsh.is_empty())
b1, r1 = lsh.b, lsh.r
lsh = MinHashLSH(threshold=0.8, weights=(0.2,0.8))
b2, r2 = lsh.b, lsh.r
self.assertTrue(b1 < b2)
self.assertTrue(r1 > r2)
def test__H(self):
'''
Check _H output consistent bytes length given
the same concatenated hash value size
'''
mg = WeightedMinHashGenerator(100, sample_size=128)
for l in range(2, mg.sample_size+1, 16):
m = mg.minhash(np.random.randint(1, 99999999, 100))
lsh = MinHashLSH(num_perm=128)
lsh.insert("m", m)
sizes = [len(H) for ht in lsh.hashtables for H in ht]
self.assertTrue(all(sizes[0] == s for s in sizes))
def test_insert(self):
lsh = MinHashLSH(threshold=0.5, num_perm=4)
mg = WeightedMinHashGenerator(10, 4)
m1 = mg.minhash(np.random.uniform(1, 10, 10))
m2 = mg.minhash(np.random.uniform(1, 10, 10))
lsh.insert("a", m1)
lsh.insert("b", m2)
for t in lsh.hashtables:
self.assertTrue(len(t) >= 1)
items = []
for H in t:
items.extend(t[H])
self.assertTrue("a" in items)
self.assertTrue("b" in items)
self.assertTrue("a" in lsh)
self.assertTrue("b" in lsh)
for i, H in enumerate(lsh.keys["a"]):
self.assertTrue("a" in lsh.hashtables[i][H])
mg = WeightedMinHashGenerator(10, 5)
m3 = mg.minhash(np.random.uniform(1, 10, 10))
self.assertRaises(ValueError, lsh.insert, "c", m3)
def test_query(self):
lsh = MinHashLSH(threshold=0.5, num_perm=4)
mg = WeightedMinHashGenerator(10, 4)
m1 = mg.minhash(np.random.uniform(1, 10, 10))
m2 = mg.minhash(np.random.uniform(1, 10, 10))
lsh.insert("a", m1)
lsh.insert("b", m2)
result = lsh.query(m1)
self.assertTrue("a" in result)
result = lsh.query(m2)
self.assertTrue("b" in result)
mg = WeightedMinHashGenerator(10, 5)
m3 = mg.minhash(np.random.uniform(1, 10, 10))
self.assertRaises(ValueError, lsh.query, m3)
def test_remove(self):
lsh = MinHashLSH(threshold=0.5, num_perm=4)
mg = WeightedMinHashGenerator(10, 4)
m1 = mg.minhash(np.random.uniform(1, 10, 10))
m2 = mg.minhash(np.random.uniform(1, 10, 10))
lsh.insert("a", m1)
lsh.insert("b", m2)
lsh.remove("a")
self.assertTrue("a" not in lsh.keys)
for table in lsh.hashtables:
for H in table:
self.assertGreater(len(table[H]), 0)
self.assertTrue("a" not in table[H])
self.assertRaises(ValueError, lsh.remove, "c")
def test_pickle(self):
lsh = MinHashLSH(threshold=0.5, num_perm=4)
mg = WeightedMinHashGenerator(10, 4)
m1 = mg.minhash(np.random.uniform(1, 10, 10))
m2 = mg.minhash(np.random.uniform(1, 10, 10))
lsh.insert("a", m1)
lsh.insert("b", m2)
lsh2 = pickle.loads(pickle.dumps(lsh))
result = lsh2.query(m1)
self.assertTrue("a" in result)
result = lsh2.query(m2)
self.assertTrue("b" in result)
if __name__ == "__main__":
unittest.main()
| 34.432024 | 77 | 0.548741 |
98f5ce526a5e55dda9761a579a504359b4349cc7 | 4,141 | py | Python | tests/modeltests/m2m_recursive/models.py | rawwell/django | 6b3264671ead4604f26cbd2b71e8d6a02945bf0c | [
"BSD-3-Clause"
] | 1 | 2016-05-08T12:24:22.000Z | 2016-05-08T12:24:22.000Z | tests/modeltests/m2m_recursive/models.py | rawwell/django | 6b3264671ead4604f26cbd2b71e8d6a02945bf0c | [
"BSD-3-Clause"
] | null | null | null | tests/modeltests/m2m_recursive/models.py | rawwell/django | 6b3264671ead4604f26cbd2b71e8d6a02945bf0c | [
"BSD-3-Clause"
] | 1 | 2015-11-19T14:45:16.000Z | 2015-11-19T14:45:16.000Z | """
28. Many-to-many relationships between the same two tables
In this example, A Person can have many friends, who are also people. Friendship is a
symmetrical relationship - if I am your friend, you are my friend.
A person can also have many idols - but while I may idolize you, you may not think
the same of me. 'Idols' is an example of a non-symmetrical m2m field. Only recursive
m2m fields may be non-symmetrical, and they are symmetrical by default.
This test validates that the m2m table will create a mangled name for the m2m table if
there will be a clash, and tests that symmetry is preserved where appropriate.
"""
from django.db import models
class Person(models.Model):
name = models.CharField(max_length=20)
friends = models.ManyToManyField('self')
idols = models.ManyToManyField('self', symmetrical=False, related_name='stalkers')
def __unicode__(self):
return self.name
__test__ = {'API_TESTS':"""
>>> a = Person(name='Anne')
>>> a.save()
>>> b = Person(name='Bill')
>>> b.save()
>>> c = Person(name='Chuck')
>>> c.save()
>>> d = Person(name='David')
>>> d.save()
# Add some friends in the direction of field definition
# Anne is friends with Bill and Chuck
>>> a.friends.add(b,c)
# David is friends with Anne and Chuck - add in reverse direction
>>> d.friends.add(a,c)
# Who is friends with Anne?
>>> a.friends.all()
[<Person: Bill>, <Person: Chuck>, <Person: David>]
# Who is friends with Bill?
>>> b.friends.all()
[<Person: Anne>]
# Who is friends with Chuck?
>>> c.friends.all()
[<Person: Anne>, <Person: David>]
# Who is friends with David?
>>> d.friends.all()
[<Person: Anne>, <Person: Chuck>]
# Bill is already friends with Anne - add Anne again, but in the reverse direction
>>> b.friends.add(a)
# Who is friends with Anne?
>>> a.friends.all()
[<Person: Bill>, <Person: Chuck>, <Person: David>]
# Who is friends with Bill?
>>> b.friends.all()
[<Person: Anne>]
# Remove Anne from Bill's friends
>>> b.friends.remove(a)
# Who is friends with Anne?
>>> a.friends.all()
[<Person: Chuck>, <Person: David>]
# Who is friends with Bill?
>>> b.friends.all()
[]
# Clear Anne's group of friends
>>> a.friends.clear()
# Who is friends with Anne?
>>> a.friends.all()
[]
# Reverse relationships should also be gone
# Who is friends with Chuck?
>>> c.friends.all()
[<Person: David>]
# Who is friends with David?
>>> d.friends.all()
[<Person: Chuck>]
# Add some idols in the direction of field definition
# Anne idolizes Bill and Chuck
>>> a.idols.add(b,c)
# Bill idolizes Anne right back
>>> b.idols.add(a)
# David is idolized by Anne and Chuck - add in reverse direction
>>> d.stalkers.add(a,c)
# Who are Anne's idols?
>>> a.idols.all()
[<Person: Bill>, <Person: Chuck>, <Person: David>]
# Who is stalking Anne?
>>> a.stalkers.all()
[<Person: Bill>]
# Who are Bill's idols?
>>> b.idols.all()
[<Person: Anne>]
# Who is stalking Bill?
>>> b.stalkers.all()
[<Person: Anne>]
# Who are Chuck's idols?
>>> c.idols.all()
[<Person: David>]
# Who is stalking Chuck?
>>> c.stalkers.all()
[<Person: Anne>]
# Who are David's idols?
>>> d.idols.all()
[]
# Who is stalking David
>>> d.stalkers.all()
[<Person: Anne>, <Person: Chuck>]
# Bill is already being stalked by Anne - add Anne again, but in the reverse direction
>>> b.stalkers.add(a)
# Who are Anne's idols?
>>> a.idols.all()
[<Person: Bill>, <Person: Chuck>, <Person: David>]
# Who is stalking Anne?
[<Person: Bill>]
# Who are Bill's idols
>>> b.idols.all()
[<Person: Anne>]
# Who is stalking Bill?
>>> b.stalkers.all()
[<Person: Anne>]
# Remove Anne from Bill's list of stalkers
>>> b.stalkers.remove(a)
# Who are Anne's idols?
>>> a.idols.all()
[<Person: Chuck>, <Person: David>]
# Who is stalking Anne?
>>> a.stalkers.all()
[<Person: Bill>]
# Who are Bill's idols?
>>> b.idols.all()
[<Person: Anne>]
# Who is stalking Bill?
>>> b.stalkers.all()
[]
# Clear Anne's group of idols
>>> a.idols.clear()
# Who are Anne's idols
>>> a.idols.all()
[]
# Reverse relationships should also be gone
# Who is stalking Chuck?
>>> c.stalkers.all()
[]
# Who is friends with David?
>>> d.stalkers.all()
[<Person: Chuck>]
"""}
| 21.455959 | 86 | 0.665298 |
f41cc159541442b91f30c2fbba895ae7afe0576e | 14,254 | py | Python | manimlib/mobject/svg/code_mobject.py | adornetejr/manim | e0715ceeff4778d11ef4ac31f8f8f2b56a2187ad | [
"MIT"
] | 8 | 2021-09-25T06:00:53.000Z | 2022-03-12T03:31:04.000Z | manimlib/mobject/svg/code_mobject.py | adornetejr/manim | e0715ceeff4778d11ef4ac31f8f8f2b56a2187ad | [
"MIT"
] | 1 | 2020-07-27T01:15:41.000Z | 2020-07-27T01:15:41.000Z | manimlib/mobject/svg/code_mobject.py | adornetejr/manim | e0715ceeff4778d11ef4ac31f8f8f2b56a2187ad | [
"MIT"
] | 1 | 2020-11-19T06:24:49.000Z | 2020-11-19T06:24:49.000Z | import html
from manimlib.constants import *
from manimlib.container.container import Container
from manimlib.mobject.geometry import Rectangle, Dot, RoundedRectangle
from manimlib.mobject.shape_matchers import SurroundingRectangle
from manimlib.mobject.svg.text_mobject import Paragraph
from manimlib.mobject.types.vectorized_mobject import VGroup
import re
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters.html import HtmlFormatter
'''
1) Code is VGroup() with three things
1.1) Code[0] is Code.background_mobject
which can be a
1.1.1) Rectangle() if background == "rectangle"
1.1.2) VGroup() of Rectangle() and Dot() for three buttons if background == "window"
1.2) Code[1] is Code.line_numbers Which is a Paragraph() object, this mean you can use
Code.line_numbers[0] or Code[1][0] to access first line number
1.3) Code[2] is Code.code
1.3.1) Which is a Paragraph() with color highlighted, this mean you can use
Code.code[1] or Code[2][1]
line number 1
Code.code[1][0] or Code.code[1][0]
first character of line number 1
Code.code[1][0:5] or Code.code[1][0:5]
first five characters of line number 1
'''
class Code(VGroup):
CONFIG = {
"tab_width": 3,
"line_spacing": 0.1,
"scale_factor": 0.5,
"run_time": 1,
"font": 'Monospac821 BT',
'stroke_width': 0,
'margin': 0.3,
'indentation_char': " ",
"background": "rectangle", # or window
"corner_radius": 0.2,
'insert_line_no': True,
'line_no_from': 1,
"line_no_buff": 0.4,
'style': 'vim',
'language': 'cpp',
'generate_html_file': False
}
def __init__(self, file_name=None, **kwargs):
Container.__init__(self, **kwargs)
self.file_name = file_name or self.file_name
self.ensure_valid_file()
self.style = self.style.lower()
self.gen_html_string()
strati = self.html_string.find("background:")
self.background_color = self.html_string[strati + 12:strati + 19]
self.gen_code_json()
self.code = self.gen_colored_lines()
if self.insert_line_no:
self.line_numbers = self.gen_line_numbers()
self.line_numbers.next_to(self.code, direction=LEFT, buff=self.line_no_buff)
if self.background == "rectangle":
if self.insert_line_no:
forground = VGroup(self.code, self.line_numbers)
else:
forground = self.code
self.background_mobject = SurroundingRectangle(forground, buff=self.margin,
color=self.background_color,
fill_color=self.background_color,
stroke_width=0,
fill_opacity=1, )
self.background_mobject.round_corners(self.corner_radius)
else:
if self.insert_line_no:
forground = VGroup(self.code, self.line_numbers)
else:
forground = self.code
height = forground.get_height() + 0.1 * 3 + 2 * self.margin
width = forground.get_width() + 0.1 * 3 + 2 * self.margin
rrect = RoundedRectangle(corner_radius=self.corner_radius, height=height, width=width,
stroke_width=0,
color=self.background_color, fill_opacity=1)
red_button = Dot(radius=0.1, stroke_width=0, color='#ff5f56')
red_button.shift(LEFT * 0.1 * 3)
yellow_button = Dot(radius=0.1, stroke_width=0, color='#ffbd2e')
green_button = Dot(radius=0.1, stroke_width=0, color='#27c93f')
green_button.shift(RIGHT * 0.1 * 3)
buttons = VGroup(red_button, yellow_button, green_button)
buttons.shift(
UP * (height / 2 - 0.1 * 2 - 0.05) + LEFT * (width / 2 - 0.1 * 5 - self.corner_radius / 2 - 0.05))
self.background_mobject = VGroup(rrect, buttons)
x = (height - forground.get_height()) / 2 - 0.1 * 3
self.background_mobject.shift(forground.get_center())
self.background_mobject.shift(UP * x)
if self.insert_line_no:
VGroup.__init__(self, self.background_mobject, self.line_numbers, *self.code, **kwargs)
else:
VGroup.__init__(self, self.background_mobject, Dot(fill_opacity=0, stroke_opacity=0), *self.code, **kwargs)
self.move_to(np.array([0, 0, 0]))
def apply_points_function_about_point(self, func, about_point=None, about_edge=None):
if about_point is None:
if about_edge is None:
about_edge = self.get_corner(UP + LEFT)
about_point = self.get_critical_point(about_edge)
for mob in self.family_members_with_points():
mob.points -= about_point
mob.points = func(mob.points)
mob.points += about_point
return self
def ensure_valid_file(self):
if self.file_name is None:
raise Exception("Must specify file for Code")
possible_paths = [
os.path.join(os.path.join("assets", "codes"), self.file_name),
self.file_name,
]
for path in possible_paths:
if os.path.exists(path):
self.file_path = path
return
raise IOError("No file matching %s in codes directory" %
self.file_name)
def gen_line_numbers(self):
line_numbers_array = []
for line_no in range(0, self.code_json.__len__()):
number = str(self.line_no_from + line_no)
line_numbers_array.append(number)
line_numbers = Paragraph(*[i for i in line_numbers_array], line_spacing=self.line_spacing,
alignment="right", font=self.font, stroke_width=self.stroke_width).scale(self.scale_factor)
return line_numbers
def gen_colored_lines(self):
lines_text = []
for line_no in range(0, self.code_json.__len__()):
line_str = ""
for word_index in range(self.code_json[line_no].__len__()):
line_str = line_str + self.code_json[line_no][word_index][0]
lines_text.append(self.tab_spaces[line_no] * "\t" + line_str)
code = Paragraph(*[i for i in lines_text], line_spacing=self.line_spacing, tab_width=self.tab_width,
alignment="left", font=self.font, stroke_width=self.stroke_width).scale(self.scale_factor)
for line_no in range(code.__len__()):
line = code[line_no]
line_char_index = self.tab_spaces[line_no]
for word_index in range(self.code_json[line_no].__len__()):
line[line_char_index:line_char_index + self.code_json[line_no][word_index][0].__len__()].set_color(
self.code_json[line_no][word_index][1])
line_char_index += self.code_json[line_no][word_index][0].__len__()
return code
def gen_html_string(self):
file = open(self.file_path, "r")
code_str = file.read()
file.close()
self.html_string = hilite_me(code_str, self.language, {}, self.style, self.insert_line_no,
"border:solid gray;border-width:.1em .1em .1em .8em;padding:.2em .6em;")
if self.generate_html_file:
os.makedirs(os.path.join("assets", "codes", "generated_html_files"), exist_ok=True)
file = open(os.path.join("assets", "codes", "generated_html_files", self.file_name + ".html"), "w")
file.write(self.html_string)
file.close()
def gen_code_json(self):
if self.background_color == "#111111" or \
self.background_color == "#272822" or \
self.background_color == "#202020" or \
self.background_color == "#000000":
self.default_color = "#ffffff"
else:
self.default_color = "#000000"
for i in range(3, -1, -1):
self.html_string = self.html_string.replace("</" + " " * i, "</")
for i in range(10, -1, -1):
self.html_string = self.html_string.replace("</span>" + " " * i, " " * i + "</span>")
self.html_string = self.html_string.replace("background-color:", "background:")
if self.insert_line_no:
start_point = self.html_string.find("</td><td><pre")
start_point = start_point + 9
else:
start_point = self.html_string.find("<pre")
self.html_string = self.html_string[start_point:]
# print(self.html_string)
lines = self.html_string.split("\n")
lines = lines[0:lines.__len__() - 2]
start_point = lines[0].find(">")
lines[0] = lines[0][start_point + 1:]
# print(lines)
self.code_json = []
self.tab_spaces = []
code_json_line_index = -1
for line_index in range(0, lines.__len__()):
if lines[line_index].__len__() == 0:
continue
# print(lines[line_index])
self.code_json.append([])
code_json_line_index = code_json_line_index + 1
if lines[line_index].startswith(self.indentation_char):
start_point = lines[line_index].find("<")
starting_string = lines[line_index][:start_point]
indentation_char_count = lines[line_index][:start_point].count(self.indentation_char)
if starting_string.__len__() != indentation_char_count * self.indentation_char.__len__():
lines[line_index] = "\t" * indentation_char_count + starting_string[starting_string.rfind(
self.indentation_char) + self.indentation_char.__len__():] + \
lines[line_index][start_point:]
else:
lines[line_index] = "\t" * indentation_char_count + lines[line_index][start_point:]
indentation_char_count = 0
while lines[line_index][indentation_char_count] == '\t':
indentation_char_count = indentation_char_count + 1
self.tab_spaces.append(indentation_char_count)
# print(lines[line_index])
lines[line_index] = self.correct_non_span(lines[line_index])
# print(lines[line_index])
words = lines[line_index].split("<span")
for word_index in range(1, words.__len__()):
color_index = words[word_index].find("color:")
if color_index == -1:
color = self.default_color
else:
starti = words[word_index][color_index:].find("#")
color = words[word_index][color_index + starti:color_index + starti + 7]
start_point = words[word_index].find(">")
end_point = words[word_index].find("</span>")
text = words[word_index][start_point + 1:end_point]
text = html.unescape(text)
if text != "":
# print(text, "'" + color + "'")
self.code_json[code_json_line_index].append([text, color])
# print(self.code_json)
def correct_non_span(self, line_str):
words = line_str.split("</span>")
line_str = ""
for i in range(0, words.__len__()):
if i != words.__len__() - 1:
j = words[i].find("<span")
else:
j = words[i].__len__()
temp = ""
starti = -1
for k in range(0, j):
if words[i][k] == "\t" and starti == -1:
continue
else:
if starti == -1: starti = k
temp = temp + words[i][k]
if temp != "":
if i != words.__len__() - 1:
temp = '<span style="color:' + self.default_color + '">' + words[i][starti:j] + "</span>"
else:
temp = '<span style="color:' + self.default_color + '">' + words[i][starti:j]
temp = temp + words[i][j:]
words[i] = temp
if words[i] != "":
line_str = line_str + words[i] + "</span>"
return line_str
def hilite_me(code, lexer, options, style, linenos, divstyles):
lexer = lexer or 'python'
style = style or 'colorful'
defstyles = 'overflow:auto;width:auto;'
formatter = HtmlFormatter(style=style,
linenos=False,
noclasses=True,
cssclass='',
cssstyles=defstyles + divstyles,
prestyles='margin: 0')
html = highlight(code, get_lexer_by_name(lexer, **options), formatter)
if linenos:
html = insert_line_numbers(html)
html = "<!-- HTML generated using hilite.me -->" + html
return html
def get_default_style():
return 'border:solid gray;border-width:.1em .1em .1em .8em;padding:.2em .6em;'
def insert_line_numbers(html):
match = re.search('(<pre[^>]*>)(.*)(</pre>)', html, re.DOTALL)
if not match: return html
pre_open = match.group(1)
pre = match.group(2)
pre_close = match.group(3)
html = html.replace(pre_close, '</pre></td></tr></table>')
numbers = range(1, pre.count('\n') + 1)
format = '%' + str(len(str(numbers[-1]))) + 'i'
lines = '\n'.join(format % i for i in numbers)
html = html.replace(pre_open, '<table><tr><td>' + pre_open + lines + '</pre></td><td>' + pre_open)
return html
| 45.832797 | 120 | 0.553459 |
f7d2deebe80a7c830aaf677e6fac86bcee59cbd0 | 210 | py | Python | 0171_ExcelSheetColumnNumber.py | yingzhuo1994/LeetCode | 636eef90867d21e3439d258ec99fbb8e5ad5a742 | [
"MIT"
] | null | null | null | 0171_ExcelSheetColumnNumber.py | yingzhuo1994/LeetCode | 636eef90867d21e3439d258ec99fbb8e5ad5a742 | [
"MIT"
] | null | null | null | 0171_ExcelSheetColumnNumber.py | yingzhuo1994/LeetCode | 636eef90867d21e3439d258ec99fbb8e5ad5a742 | [
"MIT"
] | null | null | null | class Solution:
def titleToNumber(self, columnTitle: str) -> int:
start = ord('A') - 1
num = 0
for ch in columnTitle:
num = 26 * num + ord(ch) - start
return num
| 26.25 | 53 | 0.519048 |
053139008841aaaa7a775f24f87d778d9a2ef329 | 77 | py | Python | utils/drop_database.py | nixorn/iamripbackend | 659ee43bc0c42a459665cab74ab66385e4e2937f | [
"MIT"
] | null | null | null | utils/drop_database.py | nixorn/iamripbackend | 659ee43bc0c42a459665cab74ab66385e4e2937f | [
"MIT"
] | null | null | null | utils/drop_database.py | nixorn/iamripbackend | 659ee43bc0c42a459665cab74ab66385e4e2937f | [
"MIT"
] | null | null | null | from models import *
from engine import *
Base.metadata.drop_all(engine)
| 9.625 | 30 | 0.753247 |
bc8a606361aaae624e62cad2d789d729e6df67fe | 306 | py | Python | src/presence/presences/menu_presences/away.py | SumanthAkula/valorant-rpc | 88f62496cc297aa681d9467ce45e6669ae1f438f | [
"MIT"
] | null | null | null | src/presence/presences/menu_presences/away.py | SumanthAkula/valorant-rpc | 88f62496cc297aa681d9467ce45e6669ae1f438f | [
"MIT"
] | null | null | null | src/presence/presences/menu_presences/away.py | SumanthAkula/valorant-rpc | 88f62496cc297aa681d9467ce45e6669ae1f438f | [
"MIT"
] | null | null | null | def presence(rpc,client=None,data=None,content_data=None,config=None):
rpc.update(
state="Away",
details=f"Menu - {content_data['queue_aliases'][data['queueId']] if data['queueId'] != '' else 'Custom Setup'}",
large_image="game_icon_yellow",
large_text="VALORANT",
) | 43.714286 | 120 | 0.643791 |
60ecea16b47e08b28dc13a7e6b9d73a380e6beac | 1,399 | py | Python | k8v/printer.py | jasonhanks/k8v | e41d6b6d670567abc9dc37b423b4d48ef82a4d0e | [
"MIT"
] | 1 | 2022-01-17T03:51:12.000Z | 2022-01-17T03:51:12.000Z | k8v/printer.py | jasonhanks/k8v | e41d6b6d670567abc9dc37b423b4d48ef82a4d0e | [
"MIT"
] | null | null | null | k8v/printer.py | jasonhanks/k8v | e41d6b6d670567abc9dc37b423b4d48ef82a4d0e | [
"MIT"
] | null | null | null | import k8v
class Printer:
"""The Printer is responsible for using the Formatters to generate the output properly."""
def __init__(self, config: k8v.config.Config):
self.config = config
def print(self, resource, num: int = 1, max: int = 1, delim: str = ""):
"""Print the specified resources according to the specified Formatter."""
self.config.formatter.begin_resource()
self.config.formatter.print(resource, delim)
self.config.formatter.end_resource(num == max)
# Print related objects recusively if needed
if self.config.related and len(resource._related) > 0:
for n, r in enumerate(resource._related):
self.print(r, n, len(resource._related), delim + self.config.delimeter)
def print_all(self, resources: list) -> int:
"""Properly format a list of resources found by the tool."""
# Print any beginning formatting needed (ex: JSON needs [ to begin the List)
self.config.formatter.begin()
# Format each resource and print() it to the output file
for num, resource in enumerate(resources):
self.print(resource, num, len(resources) - 1, "")
# Print any closing formatting needed (ex: JSON needs ] to close the List)
self.config.formatter.end()
# return how many resources were printed
return len(resources)
| 38.861111 | 94 | 0.652609 |
5a6de8942f83c5ca4bfd88ae6b1110297dea4078 | 1,300 | py | Python | jorldy/config/icm_ppo/atari.py | kakaoenterprise/JORLDY | 0d41e148cb9d680aa4df2727e0a813eb792d9a1b | [
"Apache-2.0"
] | 300 | 2021-11-03T07:06:34.000Z | 2022-03-24T02:23:56.000Z | jorldy/config/icm_ppo/atari.py | kakaoenterprise/JORLDY | 0d41e148cb9d680aa4df2727e0a813eb792d9a1b | [
"Apache-2.0"
] | 37 | 2021-11-04T04:31:07.000Z | 2022-03-30T01:40:49.000Z | jorldy/config/icm_ppo/atari.py | kakaoenterprise/JORLDY | 0d41e148cb9d680aa4df2727e0a813eb792d9a1b | [
"Apache-2.0"
] | 45 | 2021-11-03T08:05:56.000Z | 2022-03-24T08:35:05.000Z | ### ICM PPO Atari Config ###
env = {
# "name": it should be defined in the command. ex) python main.py --config config.AGENT.atari --env.name breakout
"render": False,
"gray_img": True,
"img_width": 84,
"img_height": 84,
"stack_frame": 4,
"no_op": True,
"skip_frame": 4,
"reward_clip": True,
"episodic_life": True,
}
agent = {
"name": "icm_ppo",
"network": "discrete_policy_value",
"head": "cnn",
"gamma": 0.99,
"batch_size": 32,
"n_step": 128,
"n_epoch": 3,
"_lambda": 0.95,
"epsilon_clip": 0.1,
"vf_coef": 1.0,
"ent_coef": 0.01,
"clip_grad_norm": 1.0,
"lr_decay": True,
# Parameters for Curiosity-driven Exploration
"icm_network": "icm_cnn", # icm_mlp, icm_cnn, icm_multi
"beta": 0.2,
"lamb": 1.0,
"eta": 0.1,
"extrinsic_coeff": 1.0,
"intrinsic_coeff": 1.0,
}
optim = {
"name": "adam",
"lr": 2.5e-4,
}
train = {
"training": True,
"load_path": None,
"run_step": 30000000,
"print_period": 10000,
"save_period": 100000,
"eval_iteration": 5,
"eval_time_limit": None,
"record": True,
"record_period": 300000,
# distributed setting
"distributed_batch_size": 1024,
"update_period": agent["n_step"],
"num_workers": 32,
}
| 22.033898 | 117 | 0.575385 |
02551de7238239f5c6cc54ee3f88e862ea7261fa | 8,277 | py | Python | IPython/frontend/qt/console/pygments_highlighter.py | tinyclues/ipython | 71e32606b0242772b81c9be0d40751ba47d95f2c | [
"BSD-3-Clause-Clear"
] | 2 | 2015-04-21T12:12:43.000Z | 2015-04-21T12:12:54.000Z | IPython/frontend/qt/console/pygments_highlighter.py | tinyclues/ipython | 71e32606b0242772b81c9be0d40751ba47d95f2c | [
"BSD-3-Clause-Clear"
] | 1 | 2015-07-16T22:26:53.000Z | 2015-07-16T22:26:53.000Z | IPython/frontend/qt/console/pygments_highlighter.py | ivanov/ipython | 62cc379d3b454923cb48e94663f385f54ec806cc | [
"BSD-3-Clause-Clear"
] | null | null | null | # System library imports.
from IPython.external.qt import QtGui
from pygments.formatters.html import HtmlFormatter
from pygments.lexer import RegexLexer, _TokenType, Text, Error
from pygments.lexers import PythonLexer
from pygments.styles import get_style_by_name
def get_tokens_unprocessed(self, text, stack=('root',)):
""" Split ``text`` into (tokentype, text) pairs.
Monkeypatched to store the final stack on the object itself.
"""
pos = 0
tokendefs = self._tokens
if hasattr(self, '_saved_state_stack'):
statestack = list(self._saved_state_stack)
else:
statestack = list(stack)
statetokens = tokendefs[statestack[-1]]
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, pos)
if m:
if type(action) is _TokenType:
yield pos, action, m.group()
else:
for item in action(self, m):
yield item
pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
statestack.pop()
elif state == '#push':
statestack.append(statestack[-1])
else:
statestack.append(state)
elif isinstance(new_state, int):
# pop
del statestack[new_state:]
elif new_state == '#push':
statestack.append(statestack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[statestack[-1]]
break
else:
try:
if text[pos] == '\n':
# at EOL, reset state to "root"
pos += 1
statestack = ['root']
statetokens = tokendefs['root']
yield pos, Text, u'\n'
continue
yield pos, Error, text[pos]
pos += 1
except IndexError:
break
self._saved_state_stack = list(statestack)
# Monkeypatch!
RegexLexer.get_tokens_unprocessed = get_tokens_unprocessed
class PygmentsBlockUserData(QtGui.QTextBlockUserData):
""" Storage for the user data associated with each line.
"""
syntax_stack = ('root',)
def __init__(self, **kwds):
for key, value in kwds.iteritems():
setattr(self, key, value)
QtGui.QTextBlockUserData.__init__(self)
def __repr__(self):
attrs = ['syntax_stack']
kwds = ', '.join([ '%s=%r' % (attr, getattr(self, attr))
for attr in attrs ])
return 'PygmentsBlockUserData(%s)' % kwds
class PygmentsHighlighter(QtGui.QSyntaxHighlighter):
""" Syntax highlighter that uses Pygments for parsing. """
#---------------------------------------------------------------------------
# 'QSyntaxHighlighter' interface
#---------------------------------------------------------------------------
def __init__(self, parent, lexer=None):
super(PygmentsHighlighter, self).__init__(parent)
self._document = QtGui.QTextDocument()
self._formatter = HtmlFormatter(nowrap=True)
self._lexer = lexer if lexer else PythonLexer()
self.set_style('default')
def highlightBlock(self, string):
""" Highlight a block of text.
"""
prev_data = self.currentBlock().previous().userData()
if prev_data is not None:
self._lexer._saved_state_stack = prev_data.syntax_stack
elif hasattr(self._lexer, '_saved_state_stack'):
del self._lexer._saved_state_stack
# Lex the text using Pygments
index = 0
for token, text in self._lexer.get_tokens(string):
length = len(text)
self.setFormat(index, length, self._get_format(token))
index += length
if hasattr(self._lexer, '_saved_state_stack'):
data = PygmentsBlockUserData(
syntax_stack=self._lexer._saved_state_stack)
self.currentBlock().setUserData(data)
# Clean up for the next go-round.
del self._lexer._saved_state_stack
#---------------------------------------------------------------------------
# 'PygmentsHighlighter' interface
#---------------------------------------------------------------------------
def set_style(self, style):
""" Sets the style to the specified Pygments style.
"""
if isinstance(style, basestring):
style = get_style_by_name(style)
self._style = style
self._clear_caches()
def set_style_sheet(self, stylesheet):
""" Sets a CSS stylesheet. The classes in the stylesheet should
correspond to those generated by:
pygmentize -S <style> -f html
Note that 'set_style' and 'set_style_sheet' completely override each
other, i.e. they cannot be used in conjunction.
"""
self._document.setDefaultStyleSheet(stylesheet)
self._style = None
self._clear_caches()
#---------------------------------------------------------------------------
# Protected interface
#---------------------------------------------------------------------------
def _clear_caches(self):
""" Clear caches for brushes and formats.
"""
self._brushes = {}
self._formats = {}
def _get_format(self, token):
""" Returns a QTextCharFormat for token or None.
"""
if token in self._formats:
return self._formats[token]
if self._style is None:
result = self._get_format_from_document(token, self._document)
else:
result = self._get_format_from_style(token, self._style)
self._formats[token] = result
return result
def _get_format_from_document(self, token, document):
""" Returns a QTextCharFormat for token by
"""
code, html = self._formatter._format_lines([(token, 'dummy')]).next()
self._document.setHtml(html)
return QtGui.QTextCursor(self._document).charFormat()
def _get_format_from_style(self, token, style):
""" Returns a QTextCharFormat for token by reading a Pygments style.
"""
result = QtGui.QTextCharFormat()
for key, value in style.style_for_token(token).items():
if value:
if key == 'color':
result.setForeground(self._get_brush(value))
elif key == 'bgcolor':
result.setBackground(self._get_brush(value))
elif key == 'bold':
result.setFontWeight(QtGui.QFont.Bold)
elif key == 'italic':
result.setFontItalic(True)
elif key == 'underline':
result.setUnderlineStyle(
QtGui.QTextCharFormat.SingleUnderline)
elif key == 'sans':
result.setFontStyleHint(QtGui.QFont.SansSerif)
elif key == 'roman':
result.setFontStyleHint(QtGui.QFont.Times)
elif key == 'mono':
result.setFontStyleHint(QtGui.QFont.TypeWriter)
return result
def _get_brush(self, color):
""" Returns a brush for the color.
"""
result = self._brushes.get(color)
if result is None:
qcolor = self._get_color(color)
result = QtGui.QBrush(qcolor)
self._brushes[color] = result
return result
def _get_color(self, color):
""" Returns a QColor built from a Pygments color string.
"""
qcolor = QtGui.QColor()
qcolor.setRgb(int(color[:2], base=16),
int(color[2:4], base=16),
int(color[4:6], base=16))
return qcolor
| 36.786667 | 80 | 0.524224 |
5f064f42bb9bfc71bbe42420c6f467701cdaa847 | 1,639 | py | Python | cvl/merge_dense_infos.py | MartinHahner/SeeingThroughFog | 3b568af0ad190c9f4cf6a4a826f69332844bff99 | [
"MIT"
] | 4 | 2021-08-16T03:56:22.000Z | 2021-11-11T13:29:31.000Z | cvl/merge_dense_infos.py | MartinHahner/SeeingThroughFog | 3b568af0ad190c9f4cf6a4a826f69332844bff99 | [
"MIT"
] | null | null | null | cvl/merge_dense_infos.py | MartinHahner/SeeingThroughFog | 3b568af0ad190c9f4cf6a4a826f69332844bff99 | [
"MIT"
] | 2 | 2022-01-12T13:42:05.000Z | 2022-01-19T04:45:05.000Z | import os
import pickle
from pathlib import Path
dir_path = Path(os.path.dirname(os.path.realpath(__file__)))
pkl_dir = dir_path.parent.parent.parent / 'data' / 'dense'
save_file = f'{pkl_dir}/dense_dbinfos_train_clear.pkl'
day_file = f'{pkl_dir}/dense_dbinfos_train_clear_day.pkl'
night_file = f'{pkl_dir}/dense_dbinfos_train_clear_night.pkl'
with open(str(day_file), 'rb') as df:
day_dict = pickle.load(df)
with open(str(night_file), 'rb') as nf:
night_dict = pickle.load(nf)
save_dict = {}
for key in day_dict:
save_dict[key] = day_dict[key] + night_dict[key]
with open(save_file, 'wb') as f:
pickle.dump(save_dict, f)
for stage in ['train', 'val', 'trainval']:
save_file = f'{pkl_dir}/dense_infos_{stage}_clear.pkl'
day_file = f'{pkl_dir}/dense_infos_{stage}_clear_day.pkl'
night_file = f'{pkl_dir}/dense_infos_{stage}_clear_night.pkl'
with open(str(day_file), 'rb') as df:
day_infos = pickle.load(df)
with open(str(night_file), 'rb') as nf:
night_infos = pickle.load(nf)
with open(save_file, 'wb') as f:
pickle.dump(day_infos + night_infos, f)
for condition in ['clear', 'light_fog', 'dense_fog', 'snow']:
save_file = f'{pkl_dir}/dense_infos_test_{condition}.pkl'
day_file = f'{pkl_dir}/dense_infos_test_{condition}_day.pkl'
night_file = f'{pkl_dir}/dense_infos_test_{condition}_night.pkl'
with open(str(day_file), 'rb') as df:
day_infos = pickle.load(df)
with open(str(night_file), 'rb') as nf:
night_infos = pickle.load(nf)
with open(save_file, 'wb') as f:
pickle.dump(day_infos + night_infos, f) | 26.868852 | 68 | 0.685784 |
4edb1dc6b18c709422f328b3e35edf46c0580a2f | 6,127 | py | Python | shs/input/frames/extra_panel.py | ansobolev/shs | 7a5f61bd66fe1e8ae047a4d3400b055175a53f4e | [
"MIT"
] | 1 | 2016-06-22T13:30:25.000Z | 2016-06-22T13:30:25.000Z | shs/input/frames/extra_panel.py | ansobolev/shs | 7a5f61bd66fe1e8ae047a4d3400b055175a53f4e | [
"MIT"
] | 1 | 2017-12-01T04:49:45.000Z | 2017-12-01T04:49:45.000Z | shs/input/frames/extra_panel.py | ansobolev/shs | 7a5f61bd66fe1e8ae047a4d3400b055175a53f4e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import wx
from shs.input.dialogs.add_block import AddBlockDlg
#import fdf_base as fdf
import wx.lib.agw.ultimatelistctrl as ulc
'A class collection representing extra options panel'
class ExtraPN(wx.ScrolledWindow):
blocks = {}
def __init__(self, *args, **kwds):
wx.ScrolledWindow.__init__(self, *args, **kwds)
self.OptL = wx.StaticText(self, -1, 'Option')
self.ValL = wx.StaticText(self, -1, 'Value')
self.OptTE = wx.TextCtrl(self, -1)
self.ValTE = wx.TextCtrl(self, -1)
self.ExtraList = ulc.UltimateListCtrl(self, -1, agwStyle=wx.LC_REPORT | wx.LC_VRULES | wx.LC_HRULES |
ulc.ULC_HAS_VARIABLE_ROW_HEIGHT)
self.AddBtn = wx.Button(self, -1, 'Add')
self.AddBlockBtn = wx.Button(self, -1, 'Add block')
self.RmBtn = wx.Button(self, -1, 'Remove')
self.fdf_opts = {}
#binding events
self.Bind(wx.EVT_BUTTON, self.OnAddBtn, self.AddBtn)
self.Bind(wx.EVT_BUTTON, self.OnAddBlockBtn, self.AddBlockBtn)
self.Bind(wx.EVT_BUTTON, self.OnRmBtn, self.RmBtn)
self.__set_properties()
self.__do_layout()
def OnAddBtn(self, evt):
# add value
if self.OptTE.GetValue() and self.ValTE.GetValue():
ind = self.ExtraList.InsertStringItem(sys.maxint, self.OptTE.GetValue())
self.ExtraList.SetStringItem(ind, 1, self.ValTE.GetValue())
self.OptTE.SetValue('')
self.ValTE.SetValue('')
def OnRmBtn(self, evt):
# delete item
for _ in range(self.ExtraList.GetSelectedItemCount()):
row = self.ExtraList.GetFirstSelected()
# delete from blocks if block is being deleted
if self.ExtraList.GetItem(row,0).GetText() in self.blocks.keys():
opt = self.ExtraList.GetItem(row,0).GetText()
# unbind button
self.Unbind(wx.EVT_BUTTON, self.blocks[opt][0])
# destroy button
self.blocks[opt][0].Destroy()
# pop from blocks
self.blocks.pop(opt)
self.ExtraList.DeleteItem(row)
def OnAddBlockBtn(self, evt):
# add block
dlg = AddBlockDlg(self, def_opt = self.OptTE.GetValue())
if dlg.ShowModal() == wx.ID_OK:
opt, val = dlg.GetBlock()
self.add_block(opt, val)
dlg.Destroy()
def OnShowBtn(self, evt):
for old_opt, [btn, old_val] in self.blocks.iteritems():
btn_id = btn.GetId()
if btn_id == evt.GetId():
# allow to change option and value via dlg
dlg = AddBlockDlg(self, def_opt = old_opt, def_val = old_val)
if dlg.ShowModal() == wx.ID_OK:
# delete old_opt
self.blocks.pop(old_opt)
# insert new_opt
new_opt, new_val = dlg.GetBlock()
self.blocks[new_opt] = [btn, new_val]
# change value in list
item, = [i for i in range(self.ExtraList.GetItemCount()) if self.ExtraList.GetItem(i, 0).GetText() == old_opt]
self.ExtraList.SetStringItem(item, 0, new_opt)
dlg.Destroy()
break
def add_block(self, opt, val):
# 'show' button
show_btn = wx.Button(self.ExtraList, -1, 'Show')
# add to blocks
self.blocks[opt] = [show_btn, val]
# add to list
ind = self.ExtraList.InsertStringItem(sys.maxint, opt)
self.ExtraList.SetItemWindow(ind, 1, show_btn, expand=True)
# bind show_btn
self.Bind(wx.EVT_BUTTON, self.OnShowBtn, show_btn)
def populate(self, d):
'Populates extras pane with values from fdf dictionary'
for key, t_val in d.iteritems():
try:
if t_val.__class__.__name__ == 'BlockValue':
self.add_block(key, '\n'.join([' '.join(s) for s in t_val.value]))
else:
ind = self.ExtraList.InsertStringItem(sys.maxint, key)
self.ExtraList.SetStringItem(ind, 1, str(t_val))
except IndexError:
continue
def extract(self):
s = ""
items = []
for i in range(self.ExtraList.GetItemCount()):
items.append([self.ExtraList.GetItem(itemOrId=i, col=j).GetText() for j in range(2)])
for k, v in items:
if k in self.blocks:
s += ("%block {0}\n"
" {1}\n"
"%endblock {0}\n").format(k, self.blocks[k][1])
else:
s += "{0:<25}\t{1}\n".format(k, v)
return s
def __set_properties(self):
self.SetScrollRate(0, 10)
self.ExtraList.InsertColumn(0, 'Option', width = 250)
self.ExtraList.InsertColumn(1, 'Value', width = 400)
def __do_layout(self):
opt_sizer = wx.GridSizer(rows=2, cols=2, vgap=5, hgap=5)
opt_sizer.Add(self.OptL, 0, wx.ALL|wx.ALIGN_CENTER, 0)
opt_sizer.Add(self.ValL, 0, wx.ALL|wx.ALIGN_CENTER, 0)
opt_sizer.Add(self.OptTE, 1, wx.ALL|wx.EXPAND, 0)
opt_sizer.Add(self.ValTE, 1, wx.ALL|wx.EXPAND, 0)
btn_sizer = wx.BoxSizer(wx.HORIZONTAL)
btn_sizer.Add(self.AddBtn, 0, wx.ALL, 5)
btn_sizer.Add(self.AddBlockBtn, 0, wx.ALL, 5)
btn_sizer.Add(self.RmBtn, 0, wx.ALL, 5)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(opt_sizer, 0, wx.ALL|wx.EXPAND, 5)
sizer.Add(btn_sizer, 0, wx.ALL, 0)
sizer.Add(self.ExtraList, 1, wx.ALL|wx.EXPAND, 5)
self.SetSizer(sizer)
self.Layout()
if __name__ == '__main__':
'A simple test'
app = wx.App()
f = wx.Frame(None, -1)
p = ExtraPN(f, -1)
s = wx.BoxSizer(wx.VERTICAL)
s.Add(p, 1, wx.EXPAND, 0)
f.SetSizer(s)
f.Layout()
app.SetTopWindow(f)
f.Show()
app.MainLoop()
| 36.041176 | 130 | 0.5559 |
f7c80f58201e0a3a660e42da363b92892a8b7b27 | 5,196 | py | Python | data/p3BR/R1/benchmark/startQiskit_QC2.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R1/benchmark/startQiskit_QC2.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R1/benchmark/startQiskit_QC2.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=3
# total number=4
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_QC2.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_belem")
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 28.549451 | 140 | 0.627213 |
1cbf3b0f4b43124d1e902ac76d5b7bedacd8e8b1 | 3,202 | py | Python | src/common/api_covalent.py | wengzilla/staketaxcsv | c52210106ec8194973f6ff178a307c77699e094e | [
"MIT"
] | 140 | 2021-12-11T23:37:46.000Z | 2022-03-29T23:04:36.000Z | src/common/api_covalent.py | wengzilla/staketaxcsv | c52210106ec8194973f6ff178a307c77699e094e | [
"MIT"
] | 80 | 2021-12-17T15:13:47.000Z | 2022-03-31T13:33:53.000Z | src/common/api_covalent.py | wengzilla/staketaxcsv | c52210106ec8194973f6ff178a307c77699e094e | [
"MIT"
] | 52 | 2021-12-12T00:37:17.000Z | 2022-03-29T23:25:09.000Z | import logging
import requests
from common.Singleton import Singleton
from settings_csv import COVALENT_API_KEY, COVALENT_NODE
# Documentation: https://www.covalenthq.com/docs/developer/
class CovalentAPI(metaclass=Singleton):
def __init__(self, chain_id):
if not COVALENT_API_KEY:
raise TypeError("Empty API_KEY")
self._session = requests.Session()
self._session.auth = ('', COVALENT_API_KEY)
self._chain_id = chain_id
def get_transactions(self, address, block_signed_at_asc=False, no_logs=False,
page_number=None, page_size=None):
"""
Retrieve all transactions for address including their decoded log events.
This endpoint does a deep-crawl of the blockchain to retrieve all kinds
of transactions that references the address.
:param str address: Passing in an ENS resolves automatically.
:param bool block_signed_at_asc: Sort the transactions in chronological
ascending order. By default it's set to false and returns transactions
in chronological descending order.
:param bool no_logs: Setting this to true will omit decoded event logs,
resulting in lighter and faster responses. By default it's set to false.
:param int page_number: The specific page to be returned.
:param int page_size: The number of results per page.
"""
endpoint = f"v1/{self._chain_id}/address/{address}/transactions_v2"
params = {
"block-signed-at-asc": block_signed_at_asc,
"no-logs": no_logs,
"page-number": page_number,
"page-size": page_size,
}
data, status_code = self._query(endpoint, params)
if status_code == 200:
data_obj = data.get("data", {})
has_more = (data_obj["pagination"].get("has_more", False)
if "pagination" in data_obj and data_obj["pagination"] else False)
return data_obj.get("items", []), has_more
else:
return None
def get_transaction(self, txhash, no_logs=False):
"""
Retrieve the transaction data with their decoded event logs.
:param str txhash: Transaction hash.
:param bool no_logs: Setting this to true will omit decoded event logs,
resulting in lighter and faster responses. By default it's set to false.
"""
endpoint = f"v1/{self._chain_id}/transaction_v2/{txhash}"
params = {
"no-logs": no_logs,
}
data, status_code = self._query(endpoint, params)
if status_code == 200:
return data.get("data", {}).get("items", [])
else:
return None
def _query(self, endpoint, params=None):
url = f"{COVALENT_NODE}/{endpoint}"
logging.info("Querying Covalent endpoint %s...", url)
response = self._session.get(url, params=params)
response_json = response.json()
if not response.ok:
logging.error("Error querying Covalent endpoint %s: %s",
url, response_json.get("error_message", "unknown"))
return response_json, response.status_code
| 39.530864 | 84 | 0.637726 |
d03debe5557f3fefecf66f0c60e60cc74459c11b | 1,314 | py | Python | tests/unit/test_external_scripts.py | davidjsherman/repo2docker | 4da2e1e71565b26a4bf1d0f0d26ae7c2373a1fd7 | [
"BSD-3-Clause"
] | 1,047 | 2017-05-25T03:37:21.000Z | 2020-08-09T19:36:56.000Z | tests/unit/test_external_scripts.py | davidjsherman/repo2docker | 4da2e1e71565b26a4bf1d0f0d26ae7c2373a1fd7 | [
"BSD-3-Clause"
] | 810 | 2017-05-24T20:50:49.000Z | 2020-08-05T15:56:38.000Z | tests/unit/test_external_scripts.py | davidjsherman/repo2docker | 4da2e1e71565b26a4bf1d0f0d26ae7c2373a1fd7 | [
"BSD-3-Clause"
] | 253 | 2017-06-02T20:23:05.000Z | 2020-08-04T17:23:22.000Z | """Test if assemble scripts from outside of r2d repo are accepted."""
import time
from repo2docker.app import Repo2Docker
from repo2docker.buildpacks import PythonBuildPack
def test_Repo2Docker_external_build_scripts(tmpdir):
tempfile = tmpdir.join("absolute-script")
tempfile.write("Hello World of Absolute Paths!")
class MockBuildPack(PythonBuildPack):
def detect(self):
return True
def get_build_script_files(self):
files = {str(tempfile): "/tmp/my_extra_script"}
files.update(super().get_build_script_files())
return files
app = Repo2Docker(repo=str(tmpdir))
app.buildpacks = [MockBuildPack]
app.initialize()
app.build()
container = app.start_container()
# give the container a chance to start
tic = 180
while container.status != "running" or tic < 0:
time.sleep(1)
tic -= 1
assert container.status == "running"
try:
status, output = container._c.exec_run(["sh", "-c", "cat /tmp/my_extra_script"])
assert status == 0
assert output.decode("utf-8") == "Hello World of Absolute Paths!"
finally:
container.stop(timeout=1)
container.reload()
assert container.status == "exited", container.status
container.remove()
| 30.55814 | 88 | 0.653729 |
7f553aca99a45abdb6da1d9d9546eca3fcd4a465 | 1,163 | py | Python | datavalidation/libraries/meta/wrrapers.py | lifus/data-validation-benchmark | fca70230da3cb9e60ac4014b4a4c7c77b78027da | [
"Apache-2.0"
] | null | null | null | datavalidation/libraries/meta/wrrapers.py | lifus/data-validation-benchmark | fca70230da3cb9e60ac4014b4a4c7c77b78027da | [
"Apache-2.0"
] | null | null | null | datavalidation/libraries/meta/wrrapers.py | lifus/data-validation-benchmark | fca70230da3cb9e60ac4014b4a4c7c77b78027da | [
"Apache-2.0"
] | null | null | null | import os
import importlib
import collections
"""
Searches for libraries in `datavalidation.libraries` module
Expects Wrapper and VALIDATORS variables to be defined in library's package
Stores wrappers for libraries for later invocations
"""
def __package_path():
return os.path.dirname(
os.path.dirname(
os.path.abspath(
__file__
)
)
)
def __package_children():
return os.listdir(
__package_path()
)
def __get_libraries_names_wrappers_and_validators():
for name in __package_children():
try:
library = importlib.import_module(
'datavalidation.libraries.{}'.format(name)
)
yield name, library.Wrapper, library.VALIDATORS
except (AttributeError, ImportError):
pass
def __get_wrappers():
wrappers = collections.defaultdict(dict)
for name, wrapper, validators in __get_libraries_names_wrappers_and_validators():
for validator_type, validator in validators.items():
wrappers[validator_type][name] = wrapper(validator)
return wrappers
ALL = __get_wrappers()
| 22.803922 | 85 | 0.66466 |
f900ba33f451bcba8dc12d20d62a59b65c4e0cfc | 1,169 | py | Python | pycon/finaid/migrations/0005_auto_20150827_1147.py | azkarmoulana/pycon | 931388e6f640c35b892bb4b2d12581ba7ec8cf4e | [
"BSD-3-Clause"
] | 154 | 2015-01-17T02:29:24.000Z | 2022-03-20T20:37:24.000Z | pycon/finaid/migrations/0005_auto_20150827_1147.py | azkarmoulana/pycon | 931388e6f640c35b892bb4b2d12581ba7ec8cf4e | [
"BSD-3-Clause"
] | 316 | 2015-01-10T04:01:50.000Z | 2020-09-30T20:18:08.000Z | pycon/finaid/migrations/0005_auto_20150827_1147.py | azkarmoulana/pycon | 931388e6f640c35b892bb4b2d12581ba7ec8cf4e | [
"BSD-3-Clause"
] | 89 | 2015-01-10T05:25:21.000Z | 2022-02-27T03:28:59.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.utils.timezone import now
def forward(apps, schema_editor):
FinancialAidReviewData = apps.get_model('finaid', 'FinancialAidReviewData')
# Where when_grant_letter_sent is not-null, we want grant_letter_sent to be True
FinancialAidReviewData.objects.exclude(when_grant_letter_sent=None).update(grant_letter_sent=True)
FinancialAidReviewData.objects.filter(when_grant_letter_sent=None).update(grant_letter_sent=False)
def backward(apps, schema_editor):
FinancialAidReviewData = apps.get_model('finaid', 'FinancialAidReviewData')
# Where grant_letter_sent is True and when_grant_letter_sent is None,
# set when_grant_letter_sent to current timestamp for lack of an alternative
FinancialAidReviewData.objects.filter(grant_letter_sent=True, when_grant_letter_sent=None).update(when_grant_letter_sent=now())
class Migration(migrations.Migration):
dependencies = [
('finaid', '0004_financialaidreviewdata_grant_letter_sent'),
]
operations = [
migrations.RunPython(forward, backward)
]
| 35.424242 | 131 | 0.78272 |
9d9ed59583a450b255724af2a79e262c4749e647 | 4,067 | py | Python | tensorflow_io/core/python/experimental/azure_ops.py | burgerkingeater/io | f2de208f474d6ba4926e2c7f9e901e102ca5c254 | [
"Apache-2.0"
] | null | null | null | tensorflow_io/core/python/experimental/azure_ops.py | burgerkingeater/io | f2de208f474d6ba4926e2c7f9e901e102ca5c254 | [
"Apache-2.0"
] | null | null | null | tensorflow_io/core/python/experimental/azure_ops.py | burgerkingeater/io | f2de208f474d6ba4926e2c7f9e901e102ca5c254 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tensorflow-io azure file system import"""
import tensorflow_io.core.python.ops # pylint: disable=unused-import
def authenticate_with_device_code(account_name):
"""Setup storage tokens by authenticating with device code
and use management APIs
Args:
account_name (str): The storage account name for which to authenticate
"""
import urllib # pylint: disable=import-outside-toplevel
import json # pylint: disable=import-outside-toplevel
import os # pylint: disable=import-outside-toplevel
from tensorflow.python.platform import ( # pylint: disable=import-outside-toplevel
tf_logging as log,
)
try:
from adal import ( # pylint: disable=import-outside-toplevel
AuthenticationContext,
)
except ModuleNotFoundError:
log.error(
"Please install adal library with `python -m pip install -U adal`"
"to use the device code authentication method"
)
return
ctx = AuthenticationContext("https://login.microsoftonline.com/common")
storage_resource = "https://management.azure.com/"
# Current multi-tenant client registerd in my AzureAD tenant
client_id = "8c375311-7f4c-406c-84f8-03dfe11ba2d3"
device_code = ctx.acquire_user_code(resource=storage_resource, client_id=client_id)
# Display authentication message to user to action in their browser
log.warn(device_code["message"])
token_response = ctx.acquire_token_with_device_code(
resource=storage_resource, user_code_info=device_code, client_id=client_id
)
headers = {"Authorization": "Bearer " + token_response["accessToken"]}
subscription_list_req = urllib.request.Request(
url="https://management.azure.com/subscriptions?api-version=2016-06-01",
headers=headers,
)
with urllib.request.urlopen(subscription_list_req) as f:
subscriptions = json.load(f)
subscriptions = subscriptions["value"]
storage_account = None
for subscription in subscriptions:
url = "https://management.azure.com/subscriptions/{}/providers/Microsoft.Storage/storageAccounts?api-version=2019-04-01".format(
subscription["subscriptionId"]
)
storage_account_list_req = urllib.request.Request(url=url, headers=headers)
with urllib.request.urlopen(storage_account_list_req) as f:
storage_accounts = json.load(f)
storage_accounts = storage_accounts["value"]
account_by_name = [s for s in storage_accounts if s.get("name") == account_name]
if any(account_by_name):
storage_account = account_by_name[0]
break
if storage_account is None:
log.error(
"Couldn't find storage account {} in any "
"available subscription".format(account_name)
)
return
url = "https://management.azure.com/{}/listKeys?api-version=2019-04-01".format(
storage_account["id"]
)
storage_list_keys_req = urllib.request.Request(
url=url, headers=headers, method="POST"
)
with urllib.request.urlopen(storage_list_keys_req) as f:
account_keys = json.load(f)
os.environ["TF_AZURE_STORAGE_KEY"] = account_keys["keys"][0]["value"]
log.info(
"Successfully set account key environment for {} "
"storage account".format(account_name)
)
| 36.63964 | 136 | 0.68478 |
d2e50c75c865d23cc31e512d9faaa748acf73b1a | 171 | py | Python | server/db_repository/manage.py | Lazarus118/virtual-queue | fde79ec6e051f372b1054d8d3790bdaf5eb31cc3 | [
"CC-BY-3.0"
] | null | null | null | server/db_repository/manage.py | Lazarus118/virtual-queue | fde79ec6e051f372b1054d8d3790bdaf5eb31cc3 | [
"CC-BY-3.0"
] | null | null | null | server/db_repository/manage.py | Lazarus118/virtual-queue | fde79ec6e051f372b1054d8d3790bdaf5eb31cc3 | [
"CC-BY-3.0"
] | null | null | null | #!/usr/bin/env python
from migrate.versioning.shell import main
if __name__ == '__main__':
main(six='<module 'six' from 'S:\python\venv\lib\site-packages\six.pyc'>')
| 28.5 | 78 | 0.707602 |
71e7559f4d26901d6450278a88d26cf770c1c32c | 4,764 | py | Python | cinder/tests/unit/api/contrib/test_volume_tenant_attribute.py | tlakshman26/cinder-new-branch | 80ba191522d1756c1871a104fbdb91008cffaaa3 | [
"Apache-2.0"
] | null | null | null | cinder/tests/unit/api/contrib/test_volume_tenant_attribute.py | tlakshman26/cinder-new-branch | 80ba191522d1756c1871a104fbdb91008cffaaa3 | [
"Apache-2.0"
] | null | null | null | cinder/tests/unit/api/contrib/test_volume_tenant_attribute.py | tlakshman26/cinder-new-branch | 80ba191522d1756c1871a104fbdb91008cffaaa3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import uuid
from lxml import etree
import webob
from cinder import context
from cinder import objects
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_volume
from cinder import volume
PROJECT_ID = '88fd1da4-f464-4a87-9ce5-26f2f40743b9'
def fake_volume_get(*args, **kwargs):
ctx = context.RequestContext('non-admin', 'fake', False)
vol = {
'id': 'fake',
'project_id': PROJECT_ID,
}
return fake_volume.fake_volume_obj(ctx, **vol)
def fake_volume_get_all(*args, **kwargs):
return objects.VolumeList(objects=[fake_volume_get()])
def app():
# no auth, just let environ['cinder.context'] pass through
api = fakes.router.APIRouter()
mapper = fakes.urlmap.URLMap()
mapper['/v2'] = api
return mapper
class VolumeTenantAttributeTest(test.TestCase):
def setUp(self):
super(VolumeTenantAttributeTest, self).setUp()
self.stubs.Set(volume.API, 'get', fake_volume_get)
self.stubs.Set(volume.API, 'get_all', fake_volume_get_all)
self.UUID = uuid.uuid4()
def test_get_volume_allowed(self):
ctx = context.RequestContext('admin', 'fake', True)
req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID)
req.method = 'GET'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = json.loads(res.body)['volume']
self.assertEqual(vol['os-vol-tenant-attr:tenant_id'], PROJECT_ID)
def test_get_volume_unallowed(self):
ctx = context.RequestContext('non-admin', 'fake', False)
req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID)
req.method = 'GET'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = json.loads(res.body)['volume']
self.assertNotIn('os-vol-tenant-attr:tenant_id', vol)
def test_list_detail_volumes_allowed(self):
ctx = context.RequestContext('admin', 'fake', True)
req = webob.Request.blank('/v2/fake/volumes/detail')
req.method = 'GET'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = json.loads(res.body)['volumes']
self.assertEqual(vol[0]['os-vol-tenant-attr:tenant_id'], PROJECT_ID)
def test_list_detail_volumes_unallowed(self):
ctx = context.RequestContext('non-admin', 'fake', False)
req = webob.Request.blank('/v2/fake/volumes/detail')
req.method = 'GET'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = json.loads(res.body)['volumes']
self.assertNotIn('os-vol-tenant-attr:tenant_id', vol[0])
def test_list_simple_volumes_no_tenant_id(self):
ctx = context.RequestContext('admin', 'fake', True)
req = webob.Request.blank('/v2/fake/volumes')
req.method = 'GET'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = json.loads(res.body)['volumes']
self.assertNotIn('os-vol-tenant-attr:tenant_id', vol[0])
def test_get_volume_xml(self):
ctx = context.RequestContext('admin', 'fake', True)
req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID)
req.method = 'GET'
req.accept = 'application/xml'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = etree.XML(res.body)
tenant_key = ('{http://docs.openstack.org/volume/ext/'
'volume_tenant_attribute/api/v2}tenant_id')
self.assertEqual(vol.get(tenant_key), PROJECT_ID)
def test_list_volumes_detail_xml(self):
ctx = context.RequestContext('admin', 'fake', True)
req = webob.Request.blank('/v2/fake/volumes/detail')
req.method = 'GET'
req.accept = 'application/xml'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = list(etree.XML(res.body))[0]
tenant_key = ('{http://docs.openstack.org/volume/ext/'
'volume_tenant_attribute/api/v2}tenant_id')
self.assertEqual(vol.get(tenant_key), PROJECT_ID)
| 36.930233 | 77 | 0.652813 |
f36e37e3e6683581056551db4cdac09ea6857ae5 | 4,664 | py | Python | src/pew/pythonista/webview.py | dylanmccall/pyeverywhere | 2b23b4a70d5b7b958716596688d9989afa024e8d | [
"Apache-2.0"
] | 7 | 2016-06-08T22:18:44.000Z | 2022-01-16T16:53:33.000Z | src/pew/pythonista/webview.py | dylanmccall/pyeverywhere | 2b23b4a70d5b7b958716596688d9989afa024e8d | [
"Apache-2.0"
] | 21 | 2015-09-02T19:20:16.000Z | 2021-10-24T00:44:29.000Z | src/pew/pythonista/webview.py | dylanmccall/pyeverywhere | 2b23b4a70d5b7b958716596688d9989afa024e8d | [
"Apache-2.0"
] | 12 | 2018-04-24T02:54:33.000Z | 2021-09-13T09:35:22.000Z | import logging
import console
import ui
from objc_util import *
NSURLCache = ObjCClass('NSURLCache')
NSURLRequest = ObjCClass('NSURLRequest')
NSUserDefaults = ObjCClass('NSUserDefaults')
UIColor = ObjCClass('UIColor')
WKWebView = ObjCClass('WKWebView')
WKWebViewConfiguration = ObjCClass('WKWebViewConfiguration')
USE_WKWEBKIT = False
@ui.in_background
def show_alert(title, message=""):
console.alert(title, message)
class PEWThread(object):
"""
PEWThread is a subclass of the Python threading.Thread object that allows it
to work with some native platforms that require additional handling when interacting
with the GUI. The API for PEWThread mimics threading.Thread exactly, so please refer
to that for API documentation.
"""
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
self.target = target
self.args = args
self.kwargs = kwargs
def start(self):
self.run()
@ui.in_background
def run(self):
self.target(*self.args, **self.kwargs)
class NativeWebView(object):
def __init__(self, name="WebView", size=None):
self.view = ui.View()
self.view.name = name
self.view.background_color = 'black'
if USE_WKWEBKIT:
self.nativeView = ObjCInstance(self.view._objc_ptr)
self.config = WKWebViewConfiguration.new().autorelease()
self.config.requiresUserActionForMediaPlayback = False
self.webview = WKWebView.alloc().initWithFrame_configuration_(self.nativeView.bounds(), self.config)
# self.webview = WKWebView.new().autorelease()
flex_width, flex_height = (1 << 1), (1 << 4)
self.webview.setAutoresizingMask_(flex_width | flex_height)
self.nativeView.addSubview_(self.webview)
else:
cache = NSURLCache.alloc().initWithMemoryCapacity_diskCapacity_diskPath_(0, 0, None)
NSURLCache.setSharedURLCache_(cache)
NSUserDefaults.standardUserDefaults().setInteger_forKey_(0, "WebKitCacheModelPreferenceKey")
NSUserDefaults.standardUserDefaults().synchronize()
# NSUserDefaults.standardUserDefaults().setBool_forKey_(False, "WebKitDiskImageCacheEnabled")
# // [[NSUserDefaults standardUserDefaults] setBool:NO forKey:@"WebKitOfflineWebApplicationCacheEnabled"];
self.webview = ui.WebView()
self.nativeView = ObjCInstance(self.webview).webView()
self.nativeView.setMediaPlaybackRequiresUserAction_(False)
self.nativeView.backgroundColor = UIColor.colorWithRed_green_blue_alpha_(0.0, 0.0, 0.0, 1.0)
self.nativeView.setOpaque_(False)
self.webview.delegate = self
self.webview.flex = 'WH'
self.view.add_subview(self.webview)
def show(self):
self.view.present('fullscreen', hide_title_bar=True)
@on_main_thread
def load_url(self, url):
if USE_WKWEBKIT:
if url.lower().startswith("file://"):
# Sadly, this does not appear to work. I'm leaving the code in place
# in case someone can figure this out, but so far all the googling
# I've done suggests loadFileURL is at least somewhat broken.
urldir = url
lastslash = url.rfind('/')
lastpart = url[lastslash:]
if len(lastpart) > 0 and lastpart.find(".") != -1:
urldir = url[:lastslash]
self.webview.loadFileURL_allowingReadAccessToURL_(nsurl(url), nsurl(urldir))
else:
self.webview.loadRequest_(NSURLRequest.requestWithURL_(nsurl(url)))
else:
self.webview.load_url(url)
def reload(self):
self.webview.reload()
def get_user_agent(self):
return ""
def set_user_agent(self, user_agent):
pass
def evaluate_javascript(self, js):
if USE_WKWEBKIT:
self.webview.evaluateJavaScript_completionHandler_(js, 0)
else:
self.webview.evaluate_javascript(js)
def webview_should_start_load(self, webview, url, nav_type):
#self.evaluate_javascript("$('#search_bar').val('%s');" % url)
return self.webview_should_start_load(self, url, nav_type)
def webview_did_start_load(self, webview):
return self.webview_did_start_load(self)
def webview_did_finish_load(self, webview):
return self.webview_did_finish_load(self)
def webview_did_fail_load(self, webview, error_code, error_msg):
return self.webview_did_fail_load(self, error_code, error_msg)
| 37.918699 | 121 | 0.661878 |
a456a31206cc3dc35f20b96d699e4213ec16cd0f | 1,942 | py | Python | WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/Overflow/coding-problems-master/Linked_Lists/max_difference_subll.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | 5 | 2021-06-02T23:44:25.000Z | 2021-12-27T16:21:57.000Z | WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/Overflow/coding-problems-master/Linked_Lists/max_difference_subll.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | 22 | 2021-05-31T01:33:25.000Z | 2021-10-18T18:32:39.000Z | WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/Overflow/coding-problems-master/Linked_Lists/max_difference_subll.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | 3 | 2021-06-19T03:37:47.000Z | 2021-08-31T00:49:51.000Z | """
Maximum Difference Sub-Linked List
Given a linked list of integers, find and return the sub-linked list of k consecutive elements where
the difference between the smallest element and the largest element is the largest possible.
If there are several sub-linked lists of k elements in items so that all these sub-linked list have
the same largest possible difference, return the sub-linked list that occurs first.
Input: 42 -> 17 -> 99 -> 12 -> 65 -> 77 -> 11 -> 26, 5
Output: 99 -> 12 -> 65 -> 77 -> 11
=========================================
Using 2 pointers (start and end), traverse the linked list and compare the results.
But first, move the end pointer for k places.
Time Complexity: O(N)
Space Complexity: O(1)
"""
############
# Solution #
############
# import ListNode class from ll_helpers.py
from ll_helpers import ListNode
def max_diference_subll(ll, k):
if ll is None:
return None
start, end = ll, ll
# move the end pointer for k-1 places
for i in range(1, k):
end = end.next
if end is None:
return None
result_start, result_end = start, end
while end is not None:
# compare the result with the current sub-linked list
if abs(result_start.val - result_end.val) < abs(start.val - end.val):
result_start, result_end = start, end
# move the both pointers
start = start.next
end = end.next
# cut the original linked list
result_end.next = None
return result_start
###########
# Testing #
###########
# import build_ll and print_ll methods from ll_helpers.py
from ll_helpers import build_ll, print_ll
# Test 1
# Correct result => 99 -> 12 -> 65 -> 77 -> 11
print_ll(max_diference_subll(build_ll([42, 17, 99, 12, 65, 77, 11, 26]), 5))
# Test 2
# Correct result => 14 -> 58 -> 11 -> 63 -> 77
print_ll(max_diference_subll(build_ll([36, 14, 58, 11, 63, 77, 46, 32, 87]), 5))
| 27.742857 | 100 | 0.636972 |
a94cd8cfe35c5f03365cfdd83cb4345207fd7f0b | 3,644 | py | Python | sdk/python/pulumi_azure_native/machinelearningservices/list_machine_learning_compute_nodes.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/machinelearningservices/list_machine_learning_compute_nodes.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/machinelearningservices/list_machine_learning_compute_nodes.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
__all__ = [
'ListMachineLearningComputeNodesResult',
'AwaitableListMachineLearningComputeNodesResult',
'list_machine_learning_compute_nodes',
]
@pulumi.output_type
class ListMachineLearningComputeNodesResult:
"""
Compute node information related to a AmlCompute.
"""
def __init__(__self__, compute_type=None, next_link=None, nodes=None):
if compute_type and not isinstance(compute_type, str):
raise TypeError("Expected argument 'compute_type' to be a str")
pulumi.set(__self__, "compute_type", compute_type)
if next_link and not isinstance(next_link, str):
raise TypeError("Expected argument 'next_link' to be a str")
pulumi.set(__self__, "next_link", next_link)
if nodes and not isinstance(nodes, list):
raise TypeError("Expected argument 'nodes' to be a list")
pulumi.set(__self__, "nodes", nodes)
@property
@pulumi.getter(name="computeType")
def compute_type(self) -> str:
"""
The type of compute
Expected value is 'AmlCompute'.
"""
return pulumi.get(self, "compute_type")
@property
@pulumi.getter(name="nextLink")
def next_link(self) -> str:
"""
The continuation token.
"""
return pulumi.get(self, "next_link")
@property
@pulumi.getter
def nodes(self) -> Sequence['outputs.AmlComputeNodeInformationResponseResult']:
"""
The collection of returned AmlCompute nodes details.
"""
return pulumi.get(self, "nodes")
class AwaitableListMachineLearningComputeNodesResult(ListMachineLearningComputeNodesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListMachineLearningComputeNodesResult(
compute_type=self.compute_type,
next_link=self.next_link,
nodes=self.nodes)
def list_machine_learning_compute_nodes(compute_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListMachineLearningComputeNodesResult:
"""
Compute node information related to a AmlCompute.
API Version: 2021-01-01.
:param str compute_name: Name of the Azure Machine Learning compute.
:param str resource_group_name: Name of the resource group in which workspace is located.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
__args__ = dict()
__args__['computeName'] = compute_name
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices:listMachineLearningComputeNodes', __args__, opts=opts, typ=ListMachineLearningComputeNodesResult).value
return AwaitableListMachineLearningComputeNodesResult(
compute_type=__ret__.compute_type,
next_link=__ret__.next_link,
nodes=__ret__.nodes)
| 37.183673 | 177 | 0.679199 |
0067fa657315e6c8f82661af4c3209cb30e10ee2 | 33,002 | py | Python | pysolr.py | aptivate/pysolr | fdc88353e028d8668d389d9dca4003509726904c | [
"BSD-3-Clause"
] | null | null | null | pysolr.py | aptivate/pysolr | fdc88353e028d8668d389d9dca4003509726904c | [
"BSD-3-Clause"
] | null | null | null | pysolr.py | aptivate/pysolr | fdc88353e028d8668d389d9dca4003509726904c | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import datetime
import logging
import re
import requests
import time
import types
import ast
try:
# Prefer lxml, if installed.
from lxml import etree as ET
except ImportError:
try:
from xml.etree import cElementTree as ET
except ImportError:
raise ImportError("No suitable ElementTree implementation was found.")
try:
# Prefer simplejson, if installed.
import simplejson as json
except ImportError:
import json
try:
# Python 3.X
from urllib.parse import urlencode
except ImportError:
# Python 2.X
from urllib import urlencode
try:
# Python 3.X
import html.entities as htmlentities
except ImportError:
# Python 2.X
import htmlentitydefs as htmlentities
try:
# Python 2.X
unicode_char = unichr
except NameError:
# Python 3.X
unicode_char = chr
# Ugh.
long = int
__author__ = 'Daniel Lindsley, Joseph Kocherhans, Jacob Kaplan-Moss'
__all__ = ['Solr']
__version__ = (3, 1, 0)
def get_version():
return "%s.%s.%s" % __version__[:3]
DATETIME_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})T(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2})(\.\d+)?Z$')
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Add the ``NullHandler`` to avoid logging by default while still allowing
# others to attach their own handlers.
LOG = logging.getLogger('pysolr')
h = NullHandler()
LOG.addHandler(h)
# For debugging...
if False:
LOG.setLevel(logging.DEBUG)
stream = logging.StreamHandler()
LOG.addHandler(stream)
def is_py3():
try:
basestring
return False
except NameError:
return True
IS_PY3 = is_py3()
def force_unicode(value):
"""
Forces a bytestring to become a Unicode string.
"""
if IS_PY3:
# Python 3.X
if isinstance(value, bytes):
value = value.decode('utf-8', errors='replace')
elif not isinstance(value, str):
value = str(value)
else:
# Python 2.X
if isinstance(value, str):
value = value.decode('utf-8', 'replace')
elif not isinstance(value, basestring):
value = unicode(value)
return value
def force_bytes(value):
"""
Forces a Unicode string to become a bytestring.
"""
if IS_PY3:
if isinstance(value, str):
value = value.encode('utf-8', 'backslashreplace')
else:
if isinstance(value, unicode):
value = value.encode('utf-8')
return value
def unescape_html(text):
"""
Removes HTML or XML character references and entities from a text string.
@param text The HTML (or XML) source text.
@return The plain text, as a Unicode string, if necessary.
Source: http://effbot.org/zone/re-sub.htm#unescape-html
"""
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unicode_char(int(text[3:-1], 16))
else:
return unicode_char(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unicode_char(htmlentities.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
def safe_urlencode(params, doseq=0):
"""
UTF-8-safe version of safe_urlencode
The stdlib safe_urlencode prior to Python 3.x chokes on UTF-8 values
which can't fail down to ascii.
"""
if IS_PY3:
return urlencode(params, doseq)
if hasattr(params, "items"):
params = params.items()
new_params = list()
for k, v in params:
k = k.encode("utf-8")
if isinstance(v, (list, tuple)):
new_params.append((k, [force_bytes(i) for i in v]))
else:
new_params.append((k, force_bytes(v)))
return urlencode(new_params, doseq)
def is_valid_xml_char_ordinal(i):
"""
Defines whether char is valid to use in xml document
XML standard defines a valid char as::
Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
"""
return ( # conditions ordered by presumed frequency
0x20 <= i <= 0xD7FF
or i in (0x9, 0xA, 0xD)
or 0xE000 <= i <= 0xFFFD
or 0x10000 <= i <= 0x10FFFF
)
def clean_xml_string(s):
"""
Cleans string from invalid xml chars
Solution was found there::
http://stackoverflow.com/questions/8733233/filtering-out-certain-bytes-in-python
"""
return ''.join(c for c in s if is_valid_xml_char_ordinal(ord(c)))
class SolrError(Exception):
pass
class Results(object):
def __init__(self, docs, hits, highlighting=None, facets=None,
spellcheck=None, stats=None, qtime=None, debug=None,
grouped=None):
self.docs = docs
self.hits = hits
self.highlighting = highlighting or {}
self.facets = facets or {}
self.spellcheck = spellcheck or {}
self.stats = stats or {}
self.qtime = qtime
self.debug = debug or {}
self.grouped = grouped or {}
def __len__(self):
return len(self.docs)
def __iter__(self):
return iter(self.docs)
class Solr(object):
"""
The main object for working with Solr.
Optionally accepts ``decoder`` for an alternate JSON decoder instance.
Default is ``json.JSONDecoder()``.
Optionally accepts ``timeout`` for wait seconds until giving up on a
request. Default is ``60`` seconds.
Usage::
solr = pysolr.Solr('http://localhost:8983/solr')
# With a 10 second timeout.
solr = pysolr.Solr('http://localhost:8983/solr', timeout=10)
"""
def __init__(self, url, decoder=None, timeout=60):
self.decoder = decoder or json.JSONDecoder()
self.url = url
self.timeout = timeout
self.log = self._get_log()
self.session = requests.Session()
self.session.stream = False
def _get_log(self):
return LOG
def _create_full_url(self, path=''):
if len(path):
return '/'.join([self.url.rstrip('/'), path.lstrip('/')])
# No path? No problem.
return self.url
def _send_request(self, method, path='', body=None, headers=None, files=None):
url = self._create_full_url(path)
method = method.lower()
log_body = body
if headers is None:
headers = {}
if log_body is None:
log_body = ''
elif not isinstance(log_body, str):
log_body = repr(body)
self.log.debug("Starting request to '%s' (%s) with body '%s'...",
url, method, log_body[:10])
start_time = time.time()
try:
requests_method = getattr(self.session, method, 'get')
except AttributeError as err:
raise SolrError("Unable to send HTTP method '{0}.".format(method))
try:
# Everything except the body can be Unicode. The body must be
# encoded to bytes to work properly on Py3.
bytes_body = body
if bytes_body is not None:
bytes_body = force_bytes(body)
if not 'content-type' in [key.lower() for key in headers.keys()]:
headers['Content-type'] = 'application/xml; charset=UTF-8'
resp = requests_method(url, data=bytes_body, headers=headers, files=files,
timeout=self.timeout)
except requests.exceptions.Timeout as err:
error_message = "Connection to server '%s' timed out: %s"
self.log.error(error_message, url, err, exc_info=True)
raise SolrError(error_message % (url, err))
except requests.exceptions.ConnectionError as err:
error_message = "Failed to connect to server at '%s', are you sure that URL is correct? Checking it in a browser might help: %s"
params = (url, err)
self.log.error(error_message, *params, exc_info=True)
raise SolrError(error_message % params)
end_time = time.time()
self.log.info("Finished '%s' (%s) with body '%s' in %0.3f seconds.",
url, method, log_body[:10], end_time - start_time)
if int(resp.status_code) != 200:
error_message = self._extract_error(resp)
self.log.error(error_message, extra={'data': {'headers': resp.headers,
'response': resp.content}})
raise SolrError(error_message)
return force_unicode(resp.content)
def _select(self, params):
# specify json encoding of results
params['wt'] = 'json'
params_encoded = safe_urlencode(params, True)
if len(params_encoded) < 1024:
# Typical case.
path = 'select/?%s' % params_encoded
return self._send_request('get', path)
else:
# Handles very long queries by submitting as a POST.
path = 'select/'
headers = {
'Content-type': 'application/x-www-form-urlencoded; charset=utf-8',
}
return self._send_request('post', path, body=params_encoded, headers=headers)
def _mlt(self, params):
# specify json encoding of results
params['wt'] = 'json'
path = 'mlt/?%s' % safe_urlencode(params, True)
return self._send_request('get', path)
def _suggest_terms(self, params):
# specify json encoding of results
params['wt'] = 'json'
path = 'terms/?%s' % safe_urlencode(params, True)
return self._send_request('get', path)
def _update(self, message, clean_ctrl_chars=True, commit=True, waitFlush=None, waitSearcher=None):
"""
Posts the given xml message to http://<self.url>/update and
returns the result.
Passing `sanitize` as False will prevent the message from being cleaned
of control characters (default True). This is done by default because
these characters would cause Solr to fail to parse the XML. Only pass
False if you're positive your data is clean.
"""
path = 'update/'
# Per http://wiki.apache.org/solr/UpdateXmlMessages, we can append a
# ``commit=true`` to the URL and have the commit happen without a
# second request.
query_vars = []
if commit is not None:
query_vars.append('commit=%s' % str(bool(commit)).lower())
if waitFlush is not None:
query_vars.append('waitFlush=%s' % str(bool(waitFlush)).lower())
if waitSearcher is not None:
query_vars.append('waitSearcher=%s' % str(bool(waitSearcher)).lower())
if query_vars:
path = '%s?%s' % (path, '&'.join(query_vars))
# Clean the message of ctrl characters.
if clean_ctrl_chars:
message = sanitize(message)
return self._send_request('post', path, message, {'Content-type': 'text/xml; charset=utf-8'})
def _extract_error(self, resp):
"""
Extract the actual error message from a solr response.
"""
reason = resp.headers.get('reason', None)
full_html = None
if reason is None:
reason, full_html = self._scrape_response(resp.headers, resp.content)
msg = "[Reason: %s]" % reason
if reason is None:
msg += "\n%s" % unescape_html(full_html)
return msg
def _scrape_response(self, headers, response):
"""
Scrape the html response.
"""
# identify the responding server
server_type = None
server_string = headers.get('server', '')
if server_string and 'jetty' in server_string.lower():
server_type = 'jetty'
if server_string and 'coyote' in server_string.lower():
import lxml.html
server_type = 'tomcat'
reason = None
full_html = ''
dom_tree = None
if server_type == 'tomcat':
# Tomcat doesn't produce a valid XML response
soup = lxml.html.fromstring(response)
body_node = soup.find('body')
p_nodes = body_node.cssselect('p')
for p_node in p_nodes:
children = p_node.getchildren()
if len(children) >= 2 and 'message' in children[0].text.lower():
reason = children[1].text
if len(children) >= 2 and hasattr(children[0], 'renderContents'):
if 'description' in children[0].renderContents().lower():
if reason is None:
reason = children[1].renderContents()
else:
reason += ", " + children[1].renderContents()
if reason is None:
from lxml.html.clean import clean_html
full_html = clean_html(response)
else:
# Let's assume others do produce a valid XML response
try:
dom_tree = ET.fromstring(response)
reason_node = None
# html page might be different for every server
if server_type == 'jetty':
reason_node = dom_tree.find('body/pre')
else:
reason_node = dom_tree.find('head/title')
if reason_node is not None:
reason = reason_node.text
if reason is None:
full_html = ET.tostring(dom_tree)
except SyntaxError as err:
full_html = "%s" % response
full_html = full_html.replace('\n', '')
full_html = full_html.replace('\r', '')
full_html = full_html.replace('<br/>', '')
full_html = full_html.replace('<br />', '')
full_html = full_html.strip()
return reason, full_html
# Conversion #############################################################
def _from_python(self, value):
"""
Converts python values to a form suitable for insertion into the xml
we send to solr.
"""
if hasattr(value, 'strftime'):
if hasattr(value, 'hour'):
value = "%sZ" % value.isoformat()
else:
value = "%sT00:00:00Z" % value.isoformat()
elif isinstance(value, bool):
if value:
value = 'true'
else:
value = 'false'
else:
if IS_PY3:
# Python 3.X
if isinstance(value, bytes):
value = str(value, errors='replace')
else:
# Python 2.X
if isinstance(value, str):
value = unicode(value, errors='replace')
value = "{0}".format(value)
return clean_xml_string(value)
def _to_python(self, value):
"""
Converts values from Solr to native Python values.
"""
if isinstance(value, (int, float, long, complex)):
return value
if isinstance(value, (list, tuple)):
value = value[0]
if value == 'true':
return True
elif value == 'false':
return False
is_string = False
if IS_PY3:
if isinstance(value, bytes):
value = force_unicode(value)
if isinstance(value, str):
is_string = True
else:
if isinstance(value, str):
value = force_unicode(value)
if isinstance(value, basestring):
is_string = True
if is_string == True:
possible_datetime = DATETIME_REGEX.search(value)
if possible_datetime:
date_values = possible_datetime.groupdict()
for dk, dv in date_values.items():
date_values[dk] = int(dv)
return datetime.datetime(date_values['year'], date_values['month'], date_values['day'], date_values['hour'], date_values['minute'], date_values['second'])
try:
# This is slightly gross but it's hard to tell otherwise what the
# string's original type might have been.
return ast.literal_eval(value)
except (ValueError, SyntaxError):
# If it fails, continue on.
pass
return value
def _is_null_value(self, value):
"""
Check if a given value is ``null``.
Criteria for this is based on values that shouldn't be included
in the Solr ``add`` request at all.
"""
if value is None:
return True
if IS_PY3:
# Python 3.X
if isinstance(value, str) and len(value) == 0:
return True
else:
# Python 2.X
if isinstance(value, basestring) and len(value) == 0:
return True
# TODO: This should probably be removed when solved in core Solr level?
return False
# API Methods ############################################################
def search(self, q, **kwargs):
"""
Performs a search and returns the results.
Requires a ``q`` for a string version of the query to run.
Optionally accepts ``**kwargs`` for additional options to be passed
through the Solr URL.
Usage::
# All docs.
results = solr.search('*:*')
# Search with highlighting.
results = solr.search('ponies', **{
'hl': 'true',
'hl.fragsize': 10,
})
"""
params = {'q': q}
params.update(kwargs)
response = self._select(params)
# TODO: make result retrieval lazy and allow custom result objects
result = self.decoder.decode(response)
result_kwargs = {}
if result.get('debug'):
result_kwargs['debug'] = result['debug']
if result.get('highlighting'):
result_kwargs['highlighting'] = result['highlighting']
if result.get('facet_counts'):
result_kwargs['facets'] = result['facet_counts']
if result.get('spellcheck'):
result_kwargs['spellcheck'] = result['spellcheck']
if result.get('stats'):
result_kwargs['stats'] = result['stats']
if 'QTime' in result.get('responseHeader', {}):
result_kwargs['qtime'] = result['responseHeader']['QTime']
if result.get('grouped'):
result_kwargs['grouped'] = result['grouped']
response = result.get('response') or {}
numFound = response.get('numFound', 0)
self.log.debug("Found '%s' search results.", numFound)
return Results(response.get('docs', ()), numFound, **result_kwargs)
def more_like_this(self, q, mltfl, **kwargs):
"""
Finds and returns results similar to the provided query.
Requires Solr 1.3+.
Usage::
similar = solr.more_like_this('id:doc_234', 'text')
"""
params = {
'q': q,
'mlt.fl': mltfl,
}
params.update(kwargs)
response = self._mlt(params)
result = self.decoder.decode(response)
if result['response'] is None:
result['response'] = {
'docs': [],
'numFound': 0,
}
self.log.debug("Found '%s' MLT results.", result['response']['numFound'])
return Results(result['response']['docs'], result['response']['numFound'])
def suggest_terms(self, fields, prefix, **kwargs):
"""
Accepts a list of field names and a prefix
Returns a dictionary keyed on field name containing a list of
``(term, count)`` pairs
Requires Solr 1.4+.
"""
params = {
'terms.fl': fields,
'terms.prefix': prefix,
}
params.update(kwargs)
response = self._suggest_terms(params)
result = self.decoder.decode(response)
terms = result.get("terms", {})
res = {}
# in Solr 1.x the value of terms is a flat list:
# ["field_name", ["dance",23,"dancers",10,"dancing",8,"dancer",6]]
#
# in Solr 3.x the value of terms is a dict:
# {"field_name": ["dance",23,"dancers",10,"dancing",8,"dancer",6]}
if isinstance(terms, (list, tuple)):
terms = dict(zip(terms[0::2], terms[1::2]))
for field, values in terms.items():
tmp = list()
while values:
tmp.append((values.pop(0), values.pop(0)))
res[field] = tmp
self.log.debug("Found '%d' Term suggestions results.", sum(len(j) for i, j in res.items()))
return res
def _build_doc(self, doc, boost=None):
doc_elem = ET.Element('doc')
for key, value in doc.items():
if key == 'boost':
doc_elem.set('boost', force_unicode(value))
continue
# To avoid multiple code-paths we'd like to treat all of our values as iterables:
if isinstance(value, (list, tuple)):
values = value
else:
values = (value, )
for bit in values:
if self._is_null_value(bit):
continue
attrs = {'name': key}
if boost and key in boost:
attrs['boost'] = force_unicode(boost[key])
field = ET.Element('field', **attrs)
field.text = self._from_python(bit)
doc_elem.append(field)
return doc_elem
def add(self, docs, commit=True, boost=None, commitWithin=None, waitFlush=None, waitSearcher=None):
"""
Adds or updates documents.
Requires ``docs``, which is a list of dictionaries. Each key is the
field name and each value is the value to index.
Optionally accepts ``commit``. Default is ``True``.
Optionally accepts ``boost``. Default is ``None``.
Optionally accepts ``commitWithin``. Default is ``None``.
Optionally accepts ``waitFlush``. Default is ``None``.
Optionally accepts ``waitSearcher``. Default is ``None``.
Usage::
solr.add([
{
"id": "doc_1",
"title": "A test document",
},
{
"id": "doc_2",
"title": "The Banana: Tasty or Dangerous?",
},
])
"""
start_time = time.time()
self.log.debug("Starting to build add request...")
message = ET.Element('add')
if commitWithin:
message.set('commitWithin', commitWithin)
for doc in docs:
message.append(self._build_doc(doc, boost=boost))
# This returns a bytestring. Ugh.
m = ET.tostring(message, encoding='utf-8')
# Convert back to Unicode please.
m = force_unicode(m)
end_time = time.time()
self.log.debug("Built add request of %s docs in %0.2f seconds.", len(message), end_time - start_time)
return self._update(m, commit=commit, waitFlush=waitFlush, waitSearcher=waitSearcher)
def delete(self, id=None, q=None, commit=True, waitFlush=None, waitSearcher=None):
"""
Deletes documents.
Requires *either* ``id`` or ``query``. ``id`` is if you know the
specific document id to remove. ``query`` is a Lucene-style query
indicating a collection of documents to delete.
Optionally accepts ``commit``. Default is ``True``.
Optionally accepts ``waitFlush``. Default is ``None``.
Optionally accepts ``waitSearcher``. Default is ``None``.
Usage::
solr.delete(id='doc_12')
solr.delete(q='*:*')
"""
if id is None and q is None:
raise ValueError('You must specify "id" or "q".')
elif id is not None and q is not None:
raise ValueError('You many only specify "id" OR "q", not both.')
elif id is not None:
m = '<delete><id>%s</id></delete>' % id
elif q is not None:
m = '<delete><query>%s</query></delete>' % q
return self._update(m, commit=commit, waitFlush=waitFlush, waitSearcher=waitSearcher)
def commit(self, waitFlush=None, waitSearcher=None, expungeDeletes=None):
"""
Forces Solr to write the index data to disk.
Optionally accepts ``expungeDeletes``. Default is ``None``.
Optionally accepts ``waitFlush``. Default is ``None``.
Optionally accepts ``waitSearcher``. Default is ``None``.
Usage::
solr.commit()
"""
if expungeDeletes is not None:
msg = '<commit expungeDeletes="%s" />' % str(bool(expungeDeletes)).lower()
else:
msg = '<commit />'
return self._update(msg, waitFlush=waitFlush, waitSearcher=waitSearcher)
def optimize(self, waitFlush=None, waitSearcher=None, maxSegments=None):
"""
Tells Solr to streamline the number of segments used, essentially a
defragmentation operation.
Optionally accepts ``maxSegments``. Default is ``None``.
Optionally accepts ``waitFlush``. Default is ``None``.
Optionally accepts ``waitSearcher``. Default is ``None``.
Usage::
solr.optimize()
"""
if maxSegments:
msg = '<optimize maxSegments="%d" />' % maxSegments
else:
msg = '<optimize />'
return self._update(msg, waitFlush=waitFlush, waitSearcher=waitSearcher)
def extract(self, file_obj, extractOnly=True, **kwargs):
"""
POSTs a file to the Solr ExtractingRequestHandler so rich content can
be processed using Apache Tika. See the Solr wiki for details:
http://wiki.apache.org/solr/ExtractingRequestHandler
The ExtractingRequestHandler has a very simply model: it extracts
contents and metadata from the uploaded file and inserts it directly
into the index. This is rarely useful as it allows no way to store
additional data or otherwise customize the record. Instead, by default
we'll use the extract-only mode to extract the data without indexing it
so the caller has the opportunity to process it as appropriate; call
with ``extractOnly=False`` if you want to insert with no additional
processing.
Returns None if metadata cannot be extracted; otherwise returns a
dictionary containing at least two keys:
:contents:
Extracted full-text content, if applicable
:metadata:
key:value pairs of text strings
"""
if not hasattr(file_obj, "name"):
raise ValueError("extract() requires file-like objects which have a defined name property")
params = {
"extractOnly": "true" if extractOnly else "false",
"lowernames": "true",
"wt": "json",
}
params.update(kwargs)
try:
# We'll provide the file using its true name as Tika may use that
# as a file type hint:
resp = self._send_request('post', 'update/extract',
body=params,
files={'file': (file_obj.name, file_obj)})
except (IOError, SolrError) as err:
self.log.error("Failed to extract document metadata: %s", err,
exc_info=True)
raise
try:
data = json.loads(resp)
except ValueError as err:
self.log.error("Failed to load JSON response: %s", err,
exc_info=True)
raise
data['contents'] = data.pop(file_obj.name, None)
data['metadata'] = metadata = {}
raw_metadata = data.pop("%s_metadata" % file_obj.name, None)
if raw_metadata:
# The raw format is somewhat annoying: it's a flat list of
# alternating keys and value lists
while raw_metadata:
metadata[raw_metadata.pop()] = raw_metadata.pop()
return data
class SolrCoreAdmin(object):
"""
Handles core admin operations: see http://wiki.apache.org/solr/CoreAdmin
Operations offered by Solr are:
1. STATUS
2. CREATE
3. RELOAD
4. RENAME
5. ALIAS
6. SWAP
7. UNLOAD
8. LOAD (not currently implemented)
"""
def __init__(self, url, *args, **kwargs):
super(SolrCoreAdmin, self).__init__(*args, **kwargs)
self.url = url
def _get_url(self, url, params={}, headers={}):
resp = requests.get(url, params=params, headers=headers)
if resp.status_code != requests.codes.ok:
resp.raise_for_status()
return force_unicode(resp.content)
def status(self, core=None):
"""http://wiki.apache.org/solr/CoreAdmin#head-9be76f5a459882c5c093a7a1456e98bea7723953"""
params = {
'action': 'STATUS',
}
if core is not None:
params.update(core=core)
return self._get_url(self.url, params=params)
def create(self, name, instance_dir=None, config='solrconfig.xml', schema='schema.xml'):
"""http://wiki.apache.org/solr/CoreAdmin#head-7ca1b98a9df8b8ca0dcfbfc49940ed5ac98c4a08"""
params = {
'action': 'CREATE',
'name': name,
'config': config,
'schema': schema,
}
if instance_dir is None:
params.update(instanceDir=name)
else:
params.update(instanceDir=instance_dir)
return self._get_url(self.url, params=params)
def reload(self, core):
"""http://wiki.apache.org/solr/CoreAdmin#head-3f125034c6a64611779442539812067b8b430930"""
params = {
'action': 'RELOAD',
'core': core,
}
return self._get_url(self.url, params=params)
def rename(self, core, other):
"""http://wiki.apache.org/solr/CoreAdmin#head-9473bee1abed39e8583ba45ef993bebb468e3afe"""
params = {
'action': 'RENAME',
'core': core,
'other': other,
}
return self._get_url(self.url, params=params)
def swap(self, core, other):
"""http://wiki.apache.org/solr/CoreAdmin#head-928b872300f1b66748c85cebb12a59bb574e501b"""
params = {
'action': 'SWAP',
'core': core,
'other': other,
}
return self._get_url(self.url, params=params)
def unload(self, core):
"""http://wiki.apache.org/solr/CoreAdmin#head-f5055a885932e2c25096a8856de840b06764d143"""
params = {
'action': 'UNLOAD',
'core': core,
}
return self._get_url(self.url, params=params)
def load(self, core):
raise NotImplementedError('Solr 1.4 and below do not support this operation.')
# Using two-tuples to preserve order.
REPLACEMENTS = (
# Nuke nasty control characters.
(b'\x00', b''), # Start of heading
(b'\x01', b''), # Start of heading
(b'\x02', b''), # Start of text
(b'\x03', b''), # End of text
(b'\x04', b''), # End of transmission
(b'\x05', b''), # Enquiry
(b'\x06', b''), # Acknowledge
(b'\x07', b''), # Ring terminal bell
(b'\x08', b''), # Backspace
(b'\x0b', b''), # Vertical tab
(b'\x0c', b''), # Form feed
(b'\x0e', b''), # Shift out
(b'\x0f', b''), # Shift in
(b'\x10', b''), # Data link escape
(b'\x11', b''), # Device control 1
(b'\x12', b''), # Device control 2
(b'\x13', b''), # Device control 3
(b'\x14', b''), # Device control 4
(b'\x15', b''), # Negative acknowledge
(b'\x16', b''), # Synchronous idle
(b'\x17', b''), # End of transmission block
(b'\x18', b''), # Cancel
(b'\x19', b''), # End of medium
(b'\x1a', b''), # Substitute character
(b'\x1b', b''), # Escape
(b'\x1c', b''), # File separator
(b'\x1d', b''), # Group separator
(b'\x1e', b''), # Record separator
(b'\x1f', b''), # Unit separator
)
def sanitize(data):
fixed_string = force_bytes(data)
for bad, good in REPLACEMENTS:
fixed_string = fixed_string.replace(bad, good)
return force_unicode(fixed_string)
| 31.251894 | 170 | 0.56039 |
5276e555943cfbe2a5abc9fef47679f0415d5d72 | 20,509 | py | Python | src/sentry/migrations/0052_migrate_project_members.py | Casecommons/sentry | b69a2373a658c5c775671fe9985c3fa4f2eafcfd | [
"BSD-3-Clause"
] | null | null | null | src/sentry/migrations/0052_migrate_project_members.py | Casecommons/sentry | b69a2373a658c5c775671fe9985c3fa4f2eafcfd | [
"BSD-3-Clause"
] | null | null | null | src/sentry/migrations/0052_migrate_project_members.py | Casecommons/sentry | b69a2373a658c5c775671fe9985c3fa4f2eafcfd | [
"BSD-3-Clause"
] | null | null | null | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
for project in orm['sentry.Project'].objects.all():
if not project.owner:
continue
if not project.team:
team = orm['sentry.Team'].objects.create(
owner=project.owner,
slug=project.slug,
name=project.name,
)
project.team = team
project.save()
else:
team = project.team
for member in project.member_set.all():
if not member.user:
continue
tm = orm['sentry.TeamMember'].objects.create(
team=team,
user=member.user,
)
key = orm['sentry.ProjectKey'].objects.get_or_create(
project=project,
user=tm.user,
)[0]
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 4, 5, 3, 30, 7, 526220)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 4, 5, 3, 30, 7, 526083)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True', 'db_index': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectdomain': {
'Meta': {'unique_together': "(('project', 'domain'),)", 'object_name': 'ProjectDomain'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'domain_set'", 'to': "orm['sentry.Project']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.projectmember': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_project_set'", 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.view': {
'Meta': {'object_name': 'View'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'verbose_name_plural': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['sentry']
| 75.959259 | 182 | 0.550539 |
90b74a1dee5d5c5926f1deeea4ee6a3ccd96d768 | 365 | py | Python | CPFrontend/common/MachineConfig.py | Conpancol/PyHeroku | 16b157a23c77cd794d246a56cf8575766e48689c | [
"MIT"
] | null | null | null | CPFrontend/common/MachineConfig.py | Conpancol/PyHeroku | 16b157a23c77cd794d246a56cf8575766e48689c | [
"MIT"
] | null | null | null | CPFrontend/common/MachineConfig.py | Conpancol/PyHeroku | 16b157a23c77cd794d246a56cf8575766e48689c | [
"MIT"
] | null | null | null | import os
class MachineConfigurator:
"""configurations"""
def __init__(self):
self.backend = ""
self.local_backend = 'http://localhost:4567'
def getBackend(self):
envvar = os.getenv('BACKEND_URL')
print(envvar)
if envvar is None:
return self.local_backend
else:
return envvar
| 19.210526 | 52 | 0.580822 |
67a352ce7f5efee2457a851c9663b9fc0de6d21b | 6,254 | py | Python | tools/pytest/testing/test_assertinterpret.py | servo-wpt-sync/web-platform-tests | 56e2df852354bc2b89e6d17a9dbafd280d24203c | [
"BSD-3-Clause"
] | 4 | 2020-09-09T15:28:01.000Z | 2021-12-01T00:59:56.000Z | tools/pytest/testing/test_assertinterpret.py | 063095/web-platform-tests | 255d54144a82ce76d8e50a4aa8de284151119f8b | [
"BSD-3-Clause"
] | 1 | 2021-03-31T20:23:55.000Z | 2021-03-31T20:23:55.000Z | tools/pytest/testing/test_assertinterpret.py | 063095/web-platform-tests | 255d54144a82ce76d8e50a4aa8de284151119f8b | [
"BSD-3-Clause"
] | 1 | 2020-03-31T17:20:54.000Z | 2020-03-31T17:20:54.000Z | "PYTEST_DONT_REWRITE"
import py
import pytest
from _pytest.assertion import util
def exvalue():
return py.std.sys.exc_info()[1]
def f():
return 2
def test_not_being_rewritten():
assert "@py_builtins" not in globals()
def test_assert():
try:
assert f() == 3
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith('assert 2 == 3\n')
def test_assert_with_explicit_message():
try:
assert f() == 3, "hello"
except AssertionError:
e = exvalue()
assert e.msg == 'hello'
def test_assert_within_finally():
excinfo = pytest.raises(ZeroDivisionError, """
try:
1/0
finally:
i = 42
""")
s = excinfo.exconly()
assert py.std.re.search("division.+by zero", s) is not None
#def g():
# A.f()
#excinfo = getexcinfo(TypeError, g)
#msg = getmsg(excinfo)
#assert msg.find("must be called with A") != -1
def test_assert_multiline_1():
try:
assert (f() ==
3)
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith('assert 2 == 3\n')
def test_assert_multiline_2():
try:
assert (f() == (4,
3)[-1])
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith('assert 2 ==')
def test_in():
try:
assert "hi" in [1, 2]
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 'hi' in")
def test_is():
try:
assert 1 is 2
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 1 is 2")
def test_attrib():
class Foo(object):
b = 1
i = Foo()
try:
assert i.b == 2
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 1 == 2")
def test_attrib_inst():
class Foo(object):
b = 1
try:
assert Foo().b == 2
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 1 == 2")
def test_len():
l = list(range(42))
try:
assert len(l) == 100
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 42 == 100")
assert "where 42 = len([" in s
def test_assert_non_string_message():
class A:
def __str__(self):
return "hello"
try:
assert 0 == 1, A()
except AssertionError:
e = exvalue()
assert e.msg == "hello"
def test_assert_keyword_arg():
def f(x=3):
return False
try:
assert f(x=5)
except AssertionError:
e = exvalue()
assert "x=5" in e.msg
def test_private_class_variable():
class X:
def __init__(self):
self.__v = 41
def m(self):
assert self.__v == 42
try:
X().m()
except AssertionError:
e = exvalue()
assert "== 42" in e.msg
# These tests should both fail, but should fail nicely...
class WeirdRepr:
def __repr__(self):
return '<WeirdRepr\nsecond line>'
def bug_test_assert_repr():
v = WeirdRepr()
try:
assert v == 1
except AssertionError:
e = exvalue()
assert e.msg.find('WeirdRepr') != -1
assert e.msg.find('second line') != -1
assert 0
def test_assert_non_string():
try:
assert 0, ['list']
except AssertionError:
e = exvalue()
assert e.msg.find("list") != -1
def test_assert_implicit_multiline():
try:
x = [1,2,3]
assert x != [1,
2, 3]
except AssertionError:
e = exvalue()
assert e.msg.find('assert [1, 2, 3] !=') != -1
def test_assert_with_brokenrepr_arg():
class BrokenRepr:
def __repr__(self): 0 / 0
e = AssertionError(BrokenRepr())
if e.msg.find("broken __repr__") == -1:
pytest.fail("broken __repr__ not handle correctly")
def test_multiple_statements_per_line():
try:
a = 1; assert a == 2
except AssertionError:
e = exvalue()
assert "assert 1 == 2" in e.msg
def test_power():
try:
assert 2**3 == 7
except AssertionError:
e = exvalue()
assert "assert (2 ** 3) == 7" in e.msg
def test_assert_customizable_reprcompare(monkeypatch):
monkeypatch.setattr(util, '_reprcompare', lambda *args: 'hello')
try:
assert 3 == 4
except AssertionError:
e = exvalue()
s = str(e)
assert "hello" in s
def test_assert_long_source_1():
try:
assert len == [
(None, ['somet text', 'more text']),
]
except AssertionError:
e = exvalue()
s = str(e)
assert 're-run' not in s
assert 'somet text' in s
def test_assert_long_source_2():
try:
assert(len == [
(None, ['somet text', 'more text']),
])
except AssertionError:
e = exvalue()
s = str(e)
assert 're-run' not in s
assert 'somet text' in s
def test_assert_raise_alias(testdir):
testdir.makepyfile("""
"PYTEST_DONT_REWRITE"
import sys
EX = AssertionError
def test_hello():
raise EX("hello"
"multi"
"line")
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*def test_hello*",
"*raise EX*",
"*1 failed*",
])
def test_assert_raise_subclass():
class SomeEx(AssertionError):
def __init__(self, *args):
super(SomeEx, self).__init__()
try:
raise SomeEx("hello")
except AssertionError:
s = str(exvalue())
assert 're-run' not in s
assert 'could not determine' in s
def test_assert_raises_in_nonzero_of_object_pytest_issue10():
class A(object):
def __nonzero__(self):
raise ValueError(42)
def __lt__(self, other):
return A()
def __repr__(self):
return "<MY42 object>"
def myany(x):
return True
try:
assert not(myany(A() < 0))
except AssertionError:
e = exvalue()
s = str(e)
assert "<MY42 object> < 0" in s
| 22.741818 | 68 | 0.543972 |
85c6ed30b0aa741bcbfe05ae7d2d67d7da75a1cc | 58 | py | Python | Codewars/7kyu/thinkful-string-drills-repeater/Python/solution1.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | 7 | 2017-09-20T16:40:39.000Z | 2021-08-31T18:15:08.000Z | Codewars/7kyu/thinkful-string-drills-repeater/Python/solution1.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | null | null | null | Codewars/7kyu/thinkful-string-drills-repeater/Python/solution1.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | null | null | null | # Python - 3.6.0
repeater = lambda string, n: string * n
| 14.5 | 39 | 0.637931 |
42c032e7557fb58d4774eb03e9fa17226601d8a1 | 1,654 | py | Python | averageActivity.py | choonlog/Global-Stabilization | 28e6cda176833ec911a009fbf6a09c39af8caea9 | [
"BSD-2-Clause"
] | 1 | 2020-03-11T16:43:00.000Z | 2020-03-11T16:43:00.000Z | averageActivity.py | choonlog/Global-Stabilization | 28e6cda176833ec911a009fbf6a09c39af8caea9 | [
"BSD-2-Clause"
] | null | null | null | averageActivity.py | choonlog/Global-Stabilization | 28e6cda176833ec911a009fbf6a09c39af8caea9 | [
"BSD-2-Clause"
] | null | null | null | import json
from pprint import pprint
import matplotlib.pyplot as plt
with open('sm.json') as data_file:
data = json.load(data_file)
attrKeyList = data["attractors"].keys()
eachActivity = []
dic = {}
for attrKey in attrKeyList:
if data["attractors"][attrKey]["type"] == "point":
pointRatio = data["attractors"][attrKey]["ratio"]
pointValue = data["attractors"][attrKey]["value"]
pointAttr = data["state_key"][pointValue]
for eachNode in pointAttr:
nodeRatio = float(eachNode) * float(pointRatio)
eachActivity.append(nodeRatio)
dic[pointValue] = eachActivity
else:
eachActivity = []
elif data["attractors"][attrKey]["type"] == "cyclic":
cyclicRatio = data["attractors"][attrKey]["ratio"]
cyclicValue = data["attractors"][attrKey]["value"]
cyclicLength = len(cyclicValue)
for eachValue in cyclicValue:
cyclicAttr = data["state_key"][eachValue]
for eachNode in cyclicAttr:
nodeRatio = float(eachNode) * float(cyclicRatio) * (1/cyclicLength)
eachActivity.append(nodeRatio)
dic[eachValue] = eachActivity
else:
eachActivity = []
averageActivity = []
for k in range(0, len(data["labels"])):
averageActivity.append(0)
for eachValue in data["state_key"]:
z = 0
for eachNode in dic[eachValue]:
averageActivity[z] = float(averageActivity[z]) + float(eachNode)
z = z + 1
else:
z = 0
for i in averageActivity:
print(i, end=" ")
| 30.62963 | 84 | 0.588271 |
3bf20bc0c7990cbddc180e0cefd4ccd66dbb525e | 1,602 | py | Python | Src/Tests/interop/net/field/__init__.py | Enerccio/ironpython26-fixed | e302db14f05396a378adb438565a829e66acbf94 | [
"MS-PL"
] | 1 | 2020-02-11T06:02:40.000Z | 2020-02-11T06:02:40.000Z | Src/Languages/IronPython/Tests/interop/net/field/__init__.py | rudimk/dlr-dotnet | 71d11769f99d6ff1516ddbaed091a359eb46c670 | [
"MS-PL"
] | null | null | null | Src/Languages/IronPython/Tests/interop/net/field/__init__.py | rudimk/dlr-dotnet | 71d11769f99d6ff1516ddbaed091a359eb46c670 | [
"MS-PL"
] | 1 | 2018-11-21T04:10:23.000Z | 2018-11-21T04:10:23.000Z | #####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Microsoft Public License. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Microsoft Public License, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Microsoft Public License.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
'''
* Where the field is defined
- value type (enum), reference type,
- generic value/reference type (bound with value/reference type)
* Field type
- value type: built-in number types, enum, user defined struct
- reference type, interface?
- Nullable<T>
- array of something
- generic type parameter and its' constructed type
* Field modifier
- const (literal),
- readonly
- static / instance
* set/get via Type|object (dot) (Static|Intance) Field
* set value with something with different type, or, none
- convert succeed, or fail
* repeating from the derived class or its instance
* (python) __set__/__get__/__delete__/__str__/__repr__
* (python) Type.__dict__['Field'], and possible __set__/__get__, GetValue/SetValue
* Use field as by-ref arguments
* Other operations against field
- Augment: +=, <<=
- Continuous dot operator
- Call operator: ()
'''
| 39.073171 | 96 | 0.656679 |
afaacbd71ffce0f05197fb0a4d593b8420bdae63 | 2,187 | py | Python | cnns/data_utils/img_proc.py | johnwlambert/dlupi-heteroscedastic-dropou | 057dd079fce7ec8833b818b77fd694c01a1adcbc | [
"MIT"
] | 39 | 2018-04-04T13:29:03.000Z | 2022-03-12T23:57:33.000Z | cnns/data_utils/img_proc.py | johnwlambert/dlupi-heteroscedastic-dropou | 057dd079fce7ec8833b818b77fd694c01a1adcbc | [
"MIT"
] | 5 | 2018-04-30T12:14:38.000Z | 2021-04-26T23:52:18.000Z | cnns/data_utils/img_proc.py | johnwlambert/dlupi-heteroscedastic-dropou | 057dd079fce7ec8833b818b77fd694c01a1adcbc | [
"MIT"
] | 10 | 2018-05-14T09:14:55.000Z | 2021-11-10T00:23:21.000Z |
# John Lambert, Alan Luo, Ozan Sener
import torch
import cv2
import numpy as np
def normalize_tensor(tensor, mean, std):
"""
No pre - processing was applied to training images besides scaling to
the range of the tanh activation function[-1, 1].
"""
for i in range(tensor.size(0)):
img = tensor[i]
for t, m, s in zip(img, mean, std):
t.sub_(m).div_(s)
tensor[i] = img
return tensor
def convert_rgb2lab( images, batch_size): # [128, 3, 32, 32]
"""
INPUT: images should be NCHW
AB channel values are in the range [-128,128]
L channel values are in the range [0,100]
"""
images_np = images.numpy()
images_np_nhwc = np.rollaxis(images_np,1,4) # NCHW to NHWC
images_LAB = torch.FloatTensor( images.size() ).zero_() # empty NCHW array to hold LAB
for i in range( images_np_nhwc.shape[0] ):
img_lab = cv2.cvtColor(images_np_nhwc[i], cv2.COLOR_BGR2Lab ) # HWC
images_LAB[i] = torch.from_numpy( np.rollaxis( img_lab, 2, 0 ) ) # to CHW
images_L = images_LAB[:,0,:,:].contiguous().view(images.size(0), 1, images.size(2), images.size(3) ) # channel 0
images_AB = images_LAB[:,1:,:,:] # channels 1 and 2
return images_L, images_AB
def create_im_masks_from_dets(use_bw_mask, ims, dets): # (images_t,labels_t):
"""
Accept PyTorch Tensor
TODO:
- Convert ops to PyTorch (and not NumPy)
- Process by batches
# TRY BOTH WAYS
# WHITE PIXELS IN BBOX REGIONS
# ACTUAL PIXEL VALUES IN BBOX REGIONS
"""
# read in ImageNet image
im = cv2.imread(fname)
im_h, im_w, im_c = im.shape
# read in its x,y,w,h coordinate detections
dets = [ (100 ,100 ,200 ,350),
(350 ,200 ,100 ,250)
]
# for each detection, find the union of the areas
mask_im = np.zeros((im_h ,im_w ,im_c)).astype(np.uint8)
for det in dets:
x ,y ,w ,h = det
if use_bw_mask:
mask_im[y: y +h ,x: x +w ,:] = 255 # np.ones((h,w,im_c)).astype(np.uint8)
else:
mask_im[y: y +h ,x: x +w ,:] = im[y: y +h ,x: x +w ,:]
# return the image, with everything outside of the union as black
return mask_im | 33.136364 | 114 | 0.616369 |
7339f8c186250af524341307a69c247ac60f65eb | 2,617 | py | Python | bridge/jobs/templatetags/tableheader.py | kirillyat/klever | 259cc1345df91a47bc028f813613f063a8465429 | [
"Apache-2.0"
] | 16 | 2018-07-14T15:37:02.000Z | 2020-07-11T14:26:59.000Z | bridge/jobs/templatetags/tableheader.py | kirillyat/klever | 259cc1345df91a47bc028f813613f063a8465429 | [
"Apache-2.0"
] | 2 | 2019-10-28T22:32:31.000Z | 2020-04-24T09:24:20.000Z | bridge/jobs/templatetags/tableheader.py | kirillyat/klever | 259cc1345df91a47bc028f813613f063a8465429 | [
"Apache-2.0"
] | 10 | 2018-11-09T12:54:15.000Z | 2021-10-31T17:01:39.000Z | from django import template
register = template.Library()
class TableHeader(template.Node):
def __init__(self, columns, titles):
self._columns = columns
self._titles = titles
def render_th(self, colspan, rowspan, title):
return f'<th colspan="{colspan}" rowspan="{rowspan}">{title}</th>'
def render_items(self, columns, titles):
max_depth = 0
column_matrix = []
for column in columns:
column_list = column.split(':')
max_depth = max(max_depth, len(column_list))
column_prefix_list = []
for i in range(len(column_list)):
column_prefix_list.append(':'.join(column_list[:(i + 1)]))
column_matrix.append(column_prefix_list)
header_html = ''
for row in range(max_depth):
prev_cell = None
header_html += '<tr>'
for column in column_matrix:
if len(column) <= row:
continue
if prev_cell:
if column[row] == prev_cell['column']:
# Just stretch the previous column
prev_cell['columns'] += 1
continue
else:
header_html += self.render_th(prev_cell['columns'], prev_cell['rows'], prev_cell['title'])
prev_cell = {'column': column[row], 'rows': 1, 'columns': 1}
if column[row] in titles:
prev_cell['title'] = titles[column[row]]
else:
prev_cell['title'] = column[row].split(':')[-1]
if len(column) == row + 1:
# The last item in the list, need vertical stretch
prev_cell['rows'] = max_depth - len(column) + 1
if prev_cell:
header_html += self.render_th(prev_cell['columns'], prev_cell['rows'], prev_cell['title'])
header_html += '</tr>'
return header_html
def render(self, context):
columns = self._columns.resolve(context)
titles = self._titles.resolve(context) or {}
return self.render_items(columns, titles)
@register.tag
def tableheader(parser, token):
try:
tag_name, columns, titles = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError('%r tag requires exactly two arguments: '
'list of columns and its titles' % token.contents.split()[0])
return TableHeader(parser.compile_filter(columns), parser.compile_filter(titles))
| 37.927536 | 114 | 0.550248 |
fd30fe6dd05abef588152bb7d64bd29692e5a6b8 | 395 | py | Python | gibprosj/gibprosj/wsgi.py | Anderzz/gib2-prosjekt | 9e5796e09e151a2b24c94d875b947c334dffecbe | [
"MIT"
] | null | null | null | gibprosj/gibprosj/wsgi.py | Anderzz/gib2-prosjekt | 9e5796e09e151a2b24c94d875b947c334dffecbe | [
"MIT"
] | null | null | null | gibprosj/gibprosj/wsgi.py | Anderzz/gib2-prosjekt | 9e5796e09e151a2b24c94d875b947c334dffecbe | [
"MIT"
] | null | null | null | """
WSGI config for gibprosj project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gibprosj.settings')
application = get_wsgi_application()
| 20.789474 | 78 | 0.782278 |
e2edae1b2fa99cbe6ba11f3516cc97f4ff6f1c2b | 1,149 | py | Python | weberp/apps/expenses/views.py | askar-alty/erp | cf5496fd7feed9d79705bbf5a034d1b13b96a98a | [
"MIT"
] | null | null | null | weberp/apps/expenses/views.py | askar-alty/erp | cf5496fd7feed9d79705bbf5a034d1b13b96a98a | [
"MIT"
] | null | null | null | weberp/apps/expenses/views.py | askar-alty/erp | cf5496fd7feed9d79705bbf5a034d1b13b96a98a | [
"MIT"
] | null | null | null | from django.views.generic.list import ListView
from . import models
class ExpenseListView(ListView):
model = models.Expense
template_name = 'expenses/expenses_list.html'
context_object_name = 'expenses'
def get_context_data(self, **kwargs):
context = super(ExpenseListView, self).get_context_data(**kwargs)
return context
def get_queryset(self):
expenses = models.Expense.objects.all()
expenses_list = []
for expense in expenses:
expenses_list.append({
'contractor': expense.contractor,
'item': expense.item,
'frequency_type': expense.frequency_type,
'products': [product for product in expense.products.all()],
'products_total_price': expense.products_total_price,
'products_number_in_pack': expense.products_number_in_pack,
'products_weight_in_pack': expense.products_weight_in_pack,
'products_count_in_pack': expense.products_count_in_pack,
'updated': expense.updated.isoformat()
})
return expenses_list | 35.90625 | 76 | 0.643168 |
33c9130e67b0faf948eb791c913bceabc00e84b4 | 4,423 | py | Python | Lib/site-packages/pyqtlineeditprogressbar/demo.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | null | null | null | Lib/site-packages/pyqtlineeditprogressbar/demo.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | Lib/site-packages/pyqtlineeditprogressbar/demo.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | null | null | null | """
Demo source for PyQtLineEditProgressBar
"""
import time
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5 import QtWidgets
import pyqtlineeditprogressbar as pqtpbar
from pyqtlineeditprogressbar import PyQtLineEditProgressBar
def find_color_name(color_value):
name = 'CUSTOM color'
for color_name in pqtpbar.EMBEDDED_COLORS:
if pqtpbar.EMBEDDED_COLORS[color_name] == color_value:
name = "DEFAULT {}".format(color_name)
return('with {} [{}]'.format(name, color_value))
default_color_value = pqtpbar.EMBEDDED_COLORS[pqtpbar.DEFAULT_COLOR_NAME]
class Dialog(QtWidgets.QDialog):
def __init__(self, parent=None):
QtWidgets.QDialog .__init__(self, parent)
mainLayout = QtWidgets.QVBoxLayout()
self.setWindowTitle("PyQtLineEditProgressBar Demo")
self.lineedit1 = PyQtLineEditProgressBar()
self.lineedit1.setAlignment(QtCore.Qt.AlignCenter)
#self.lineedit1.setSizePolicy()
self.lineedit1.setText(self.lineedit1.getBehavior() + " {}".format(find_color_name(default_color_value)) )
self.lineedit2 = PyQtLineEditProgressBar(behavior=pqtpbar.STARTS_EMPTY_FILLS_RIGHT_TO_LEFT,
progressbar_color=pqtpbar.DEFAULT_COLOR_RED)
self.lineedit2.setAlignment(QtCore.Qt.AlignCenter)
self.lineedit2.setText(self.lineedit2.getBehavior() + " {}".format(find_color_name(pqtpbar.DEFAULT_COLOR_RED)) )
self.lineedit3 = PyQtLineEditProgressBar(behavior=pqtpbar.STARTS_FULL_EMPTIES_LEFT_TO_RIGHT,
progressbar_color=pqtpbar.DEFAULT_COLOR_ORANGE)
self.lineedit3.setAlignment(QtCore.Qt.AlignCenter)
self.lineedit3.setText(self.lineedit3.getBehavior() + " {}".format(find_color_name(pqtpbar.DEFAULT_COLOR_ORANGE)) )
self.lineedit4 = PyQtLineEditProgressBar(behavior=pqtpbar.STARTS_FULL_EMPTIES_RIGHT_TO_LEFT,
progressbar_color=pqtpbar.DEFAULT_COLOR_BLUE)
self.lineedit4.setAlignment(QtCore.Qt.AlignCenter)
self.lineedit4.setText(self.lineedit4.getBehavior() + " {}".format(find_color_name(pqtpbar.DEFAULT_COLOR_BLUE)) )
self.lineedit5 = PyQtLineEditProgressBar(progressbar_color=pqtpbar.DEFAULT_COLOR_YELLOW)
self.lineedit5.setAlignment(QtCore.Qt.AlignCenter)
self.lineedit5.setText(self.lineedit5.getBehavior() + " {}".format(find_color_name(pqtpbar.DEFAULT_COLOR_YELLOW)) )
self.lineedit6 = PyQtLineEditProgressBar(behavior=pqtpbar.STARTS_EMPTY_FILLS_RIGHT_TO_LEFT,
progressbar_color=pqtpbar.DEFAULT_COLOR_PURPLE)
self.lineedit6.setAlignment(QtCore.Qt.AlignCenter)
self.lineedit6.setText(self.lineedit6.getBehavior() + " {}".format(find_color_name(pqtpbar.DEFAULT_COLOR_PURPLE)) )
self.lineedit7 = PyQtLineEditProgressBar(behavior=pqtpbar.STARTS_FULL_EMPTIES_RIGHT_TO_LEFT,
progressbar_color="#32a893")
self.lineedit7.setAlignment(QtCore.Qt.AlignCenter)
self.lineedit7.setText(self.lineedit7.getBehavior() + " {}".format(find_color_name("#32a893")) )
mainLayout.addWidget(self.lineedit1)
mainLayout.addWidget(self.lineedit2)
mainLayout.addWidget(self.lineedit3)
mainLayout.addWidget(self.lineedit4)
mainLayout.addWidget(self.lineedit5)
mainLayout.addWidget(self.lineedit6)
mainLayout.addWidget(self.lineedit7)
button = QtWidgets.QPushButton('Update Progress')
button.clicked.connect(self.buttonClicked)
mainLayout.addWidget(button)
self.setLayout(mainLayout)
def buttonClicked(self):
self.lineedit1.updateProgress(0.1)
#self.lineedit1.setText(self.lineedit1.getBehavior())
self.lineedit2.updateProgress(0.1)
#self.lineedit2.setText(self.lineedit2.getBehavior())
self.lineedit3.updateProgress(0.1)
#self.lineedit3.setText(self.lineedit3.getBehavior())
self.lineedit4.updateProgress(0.1)
#self.lineedit4.setText(self.lineedit4.getBehavior())
self.lineedit5.updateProgress(0.1)
#self.lineedit5.setText(self.lineedit5.getBehavior())
self.lineedit6.updateProgress(0.1)
#self.lineedit6.setText(self.lineedit6.getBehavior())
self.lineedit7.updateProgress(0.1)
time.sleep(1)
# ----------------------------------------------------------------------------
def main():
app = QtWidgets.QApplication([])
window = Dialog()
#window.resize(400, 50)
window.setFixedSize(350, 350)
window.show()
app.exec_()
# ----------------------------------------------------------------------------
# ----------------------------- main -----------------------------------------
# ----------------------------------------------------------------------------
if __name__ == "__main__":
main() | 38.798246 | 117 | 0.737508 |
6ec05231c6f8658db7db8de31f4c23279216bdc6 | 495 | py | Python | configs/selfsup/_base_/models/deepcluster/r18.py | Westlake-AI/openmixup | ea81250819e740dd823e30cb7ce382d14a3c1b91 | [
"Apache-2.0"
] | 10 | 2021-12-30T10:22:27.000Z | 2022-03-30T02:31:38.000Z | configs/selfsup/_base_/models/deepcluster/r18.py | Westlake-AI/openmixup | ea81250819e740dd823e30cb7ce382d14a3c1b91 | [
"Apache-2.0"
] | 3 | 2022-01-20T21:02:48.000Z | 2022-03-19T13:49:45.000Z | configs/selfsup/_base_/models/deepcluster/r18.py | Westlake-AI/openmixup | ea81250819e740dd823e30cb7ce382d14a3c1b91 | [
"Apache-2.0"
] | null | null | null | # model settings
model = dict(
type='DeepCluster',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(3,), # no conv-1, x-1: stage-x
norm_cfg=dict(type='BN'),
style='pytorch'),
neck=dict(type='AvgPoolNeck'),
head=dict(
type='ClsHead',
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
with_avg_pool=False, # already has avgpool in the neck
in_channels=512, num_classes=10000)
)
| 27.5 | 63 | 0.587879 |
3ced1b6a7c5cbf7e8b773c58f28072f575668652 | 2,259 | py | Python | deep_qa/layers/backend/envelope.py | richarajpal/deep_qa | d918335a1bed71b9cfccf1d5743321cee9c61952 | [
"Apache-2.0"
] | 459 | 2017-02-08T13:40:17.000Z | 2021-12-12T12:57:48.000Z | deep_qa/layers/backend/envelope.py | nelson-liu/deep_qa | 00d36306759cb1c232489f68844371fb727ce2c8 | [
"Apache-2.0"
] | 176 | 2017-01-26T01:19:41.000Z | 2018-04-22T19:16:01.000Z | deep_qa/layers/backend/envelope.py | nelson-liu/deep_qa | 00d36306759cb1c232489f68844371fb727ce2c8 | [
"Apache-2.0"
] | 154 | 2017-01-26T01:00:30.000Z | 2021-02-05T10:44:42.000Z | from overrides import overrides
from keras import backend as K
from ..masked_layer import MaskedLayer
class Envelope(MaskedLayer):
"""
Given a probability distribution over a begin index and an end index of some sequence, this
``Layer`` computes an envelope over the sequence, a probability that each element lies within
"begin" and "end".
Specifically, the computation done here is the following::
after_span_begin = K.cumsum(span_begin, axis=-1)
after_span_end = K.cumsum(span_end, axis=-1)
before_span_end = 1 - after_span_end
envelope = after_span_begin * before_span_end
Inputs:
- span_begin: tensor with shape ``(batch_size, sequence_length)``, representing a
probability distribution over a start index in the sequence
- span_end: tensor with shape ``(batch_size, sequence_length)``, representing a probability
distribution over an end index in the sequence
Outputs:
- envelope: tensor with shape ``(batch_size, sequence_length)``, representing a probability
for each index of the sequence belonging in the span
If there is a mask associated with either of the inputs, we ignore it, assuming that you used
the mask correctly when you computed your probability distributions. But we support masking in
this layer, so that you have an output mask if you really need it. We just return the first
mask that is not ``None`` (or ``None``, if both are ``None``).
"""
def __init__(self, **kwargs):
super(Envelope, self).__init__(**kwargs)
@overrides
def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument
span_begin_mask, span_end_mask = mask
return span_begin_mask if span_begin_mask is not None else span_end_mask
@overrides
def compute_output_shape(self, input_shape):
span_begin_shape, _ = input_shape
return span_begin_shape
@overrides
def call(self, inputs, mask=None):
span_begin, span_end = inputs
after_span_begin = K.cumsum(span_begin, axis=-1)
after_span_end = K.cumsum(span_end, axis=-1)
before_span_end = 1.0 - after_span_end
return after_span_begin * before_span_end
| 40.339286 | 99 | 0.702081 |
3ffe9be4a59b9d108e8d6ffc071ff132385b77b3 | 4,526 | py | Python | drf_chunked_upload/management/commands/delete_expired_uploads.py | daviddavis/drf-chunked-upload | b946677ca1b6e4017309049b8ad0bb5b200008c8 | [
"MIT-0"
] | 1 | 2021-07-04T10:49:42.000Z | 2021-07-04T10:49:42.000Z | drf_chunked_upload/management/commands/delete_expired_uploads.py | daviddavis/drf-chunked-upload | b946677ca1b6e4017309049b8ad0bb5b200008c8 | [
"MIT-0"
] | null | null | null | drf_chunked_upload/management/commands/delete_expired_uploads.py | daviddavis/drf-chunked-upload | b946677ca1b6e4017309049b8ad0bb5b200008c8 | [
"MIT-0"
] | 1 | 2021-07-04T10:49:44.000Z | 2021-07-04T10:49:44.000Z | from __future__ import print_function
from collections import Counter
from six import iteritems
import django.apps
from django.core.management.base import BaseCommand
from django.utils import timezone
from django.utils.translation import ugettext as _
from drf_chunked_upload.settings import EXPIRATION_DELTA
from drf_chunked_upload.models import ChunkedUpload
PROMPT_MSG = _(u'Do you want to delete {obj}?')
VALID_RESP = {
"yes": True,
"y": True,
"ye": True,
"no": False,
"n": False
}
class Command(BaseCommand):
# Has to be a ChunkedUpload subclass
base_model = ChunkedUpload
help = 'Deletes chunked uploads that have already expired.'
def add_arguments(self, parser):
parser.add_argument(
'models',
metavar='app.model',
nargs='*',
help='Any app.model classes you want to clean up. Default is all ChunkedUpload subclasses within a project.',
)
parser.add_argument(
'-i',
'--interactive',
action='store_true',
dest='interactive',
default=False,
help='Prompt for confirmation before each deletion.',
)
parser.add_argument(
'-k',
'--keep-record',
action='store_false',
dest='delete_record',
default=True,
help="Don't delete upload records, just uploaded files on disk.",
)
def handle(self, *args, **options):
filter_models = options.get('models', None)
interactive = options.get('interactive')
delete_record = options.get('delete_record')
upload_models = self.get_models(filter_models=filter_models)
for model in upload_models:
self.process_model(model, interactive=interactive, delete_record=delete_record)
def _get_filter_model(self, model):
model_app, model_name = model.split('.')
try:
model_cls = django.apps.apps.get_app_config(model_app).get_model(model_name)
except LookupError as e:
print("WARNING: {}", e)
else:
if issubclass(model_cls, self.base_model):
return model_cls
print("WARNING: Model {} is not a subclass of ChunkedUpload and will be skipped.".format(model))
return None
def get_models(self, filter_models=None):
upload_models = []
if filter_models:
# the models were specified and
# we want to process only them
for model in filter_models:
model = self._get_filter_model(model)
if model:
upload_models.append(model)
else:
# no models were specified and we want
# to find all ChunkedUpload classes
upload_models = \
[m for m in django.apps.apps.get_models() if issubclass(m, self.base_model)]
return upload_models
def process_model(self, model, interactive=False, delete_record=True):
print('Processing uploads for model {}.{}...'.format(
model._meta.app_label,
model.__name__,
))
count = Counter({state[0]: 0 for state in model.STATUS_CHOICES})
chunked_uploads = model.objects.filter(
created_at__lt=(timezone.now() - EXPIRATION_DELTA)
)
if delete_record == False:
chunked_uploads = chunked_uploads.exclude(file__isnull=True)
for chunked_upload in chunked_uploads:
if interactive and not self.get_confirmation(chunked_upload):
continue
count[chunked_upload.status] += 1
# Deleting objects individually to call delete method explicitly
if delete_record:
chunked_upload.delete()
else:
chunked_upload.delete_file()
chunked_upload.save()
for state, number in iteritems(count):
print(
'{} {} upload{}s were deleted.'.format(
number,
dict(model.STATUS_CHOICES)[state].lower(),
(' file' if not delete_record else ''),
)
)
def get_confirmation(self, chunked_upload):
prompt = PROMPT_MSG.format(obj=chunked_upload) + u' (y/n): '
while True not in ('y', 'n'):
answer = VALID_RESP.get(raw_input(prompt).lower(), None)
if answer is not None:
return answer
| 32.561151 | 121 | 0.595007 |
fd81b025aac35e387605c9fe472d2d86e1037ede | 21,559 | py | Python | tests/hwsim/run-tests.py | AstroProfundis/hostap | f4830bed661f4adff51f50a0d37c64ceb748e780 | [
"Unlicense"
] | null | null | null | tests/hwsim/run-tests.py | AstroProfundis/hostap | f4830bed661f4adff51f50a0d37c64ceb748e780 | [
"Unlicense"
] | null | null | null | tests/hwsim/run-tests.py | AstroProfundis/hostap | f4830bed661f4adff51f50a0d37c64ceb748e780 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python2
#
# Test case executor
# Copyright (c) 2013-2015, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import os
import re
import sys
import time
from datetime import datetime
import argparse
import subprocess
import termios
import logging
logger = logging.getLogger()
try:
import sqlite3
sqlite3_imported = True
except ImportError:
sqlite3_imported = False
scriptsdir = os.path.dirname(os.path.realpath(sys.modules[__name__].__file__))
sys.path.append(os.path.join(scriptsdir, '..', '..', 'wpaspy'))
from wpasupplicant import WpaSupplicant
from hostapd import HostapdGlobal
from check_kernel import check_kernel
from wlantest import Wlantest
from utils import HwsimSkip
def set_term_echo(fd, enabled):
[iflag, oflag, cflag, lflag, ispeed, ospeed, cc] = termios.tcgetattr(fd)
if enabled:
lflag |= termios.ECHO
else:
lflag &= ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW,
[iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
def reset_devs(dev, apdev):
ok = True
for d in dev:
try:
d.reset()
except Exception, e:
logger.info("Failed to reset device " + d.ifname)
print str(e)
ok = False
wpas = None
try:
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
ifaces = wpas.global_request("INTERFACES").splitlines()
for iface in ifaces:
if iface.startswith("wlan"):
wpas.interface_remove(iface)
except Exception, e:
pass
if wpas:
wpas.close_ctrl()
try:
hapd = HostapdGlobal()
hapd.flush()
hapd.remove('wlan3-3')
hapd.remove('wlan3-2')
for ap in apdev:
hapd.remove(ap['ifname'])
except Exception, e:
logger.info("Failed to remove hostapd interface")
print str(e)
ok = False
return ok
def add_log_file(conn, test, run, type, path):
if not os.path.exists(path):
return
contents = None
with open(path, 'r') as f:
contents = f.read()
if contents is None:
return
sql = "INSERT INTO logs(test,run,type,contents) VALUES(?, ?, ?, ?)"
params = (test, run, type, sqlite3.Binary(contents))
try:
conn.execute(sql, params)
conn.commit()
except Exception, e:
print "sqlite: " + str(e)
print "sql: %r" % (params, )
def report(conn, prefill, build, commit, run, test, result, duration, logdir,
sql_commit=True):
if conn:
if not build:
build = ''
if not commit:
commit = ''
if prefill:
conn.execute('DELETE FROM results WHERE test=? AND run=? AND result=?', (test, run, 'NOTRUN'))
sql = "INSERT INTO results(test,result,run,time,duration,build,commitid) VALUES(?, ?, ?, ?, ?, ?, ?)"
params = (test, result, run, time.time(), duration, build, commit)
try:
conn.execute(sql, params)
if sql_commit:
conn.commit()
except Exception, e:
print "sqlite: " + str(e)
print "sql: %r" % (params, )
if result == "FAIL":
for log in [ "log", "log0", "log1", "log2", "log3", "log5",
"hostapd", "dmesg", "hwsim0", "hwsim0.pcapng" ]:
add_log_file(conn, test, run, log,
logdir + "/" + test + "." + log)
class DataCollector(object):
def __init__(self, logdir, testname, tracing, dmesg):
self._logdir = logdir
self._testname = testname
self._tracing = tracing
self._dmesg = dmesg
def __enter__(self):
if self._tracing:
output = os.path.abspath(os.path.join(self._logdir, '%s.dat' % (self._testname, )))
self._trace_cmd = subprocess.Popen(['trace-cmd', 'record', '-o', output, '-e', 'mac80211', '-e', 'cfg80211', '-e', 'printk', 'sh', '-c', 'echo STARTED ; read l'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=open('/dev/null', 'w'),
cwd=self._logdir)
l = self._trace_cmd.stdout.read(7)
while self._trace_cmd.poll() is None and not 'STARTED' in l:
l += self._trace_cmd.stdout.read(1)
res = self._trace_cmd.returncode
if res:
print "Failed calling trace-cmd: returned exit status %d" % res
sys.exit(1)
def __exit__(self, type, value, traceback):
if self._tracing:
self._trace_cmd.stdin.write('DONE\n')
self._trace_cmd.wait()
if self._dmesg:
output = os.path.join(self._logdir, '%s.dmesg' % (self._testname, ))
num = 0
while os.path.exists(output):
output = os.path.join(self._logdir, '%s.dmesg-%d' % (self._testname, num))
num += 1
subprocess.call(['dmesg', '-c'], stdout=open(output, 'w'))
def rename_log(logdir, basename, testname, dev):
try:
import getpass
srcname = os.path.join(logdir, basename)
dstname = os.path.join(logdir, testname + '.' + basename)
num = 0
while os.path.exists(dstname):
dstname = os.path.join(logdir,
testname + '.' + basename + '-' + str(num))
num = num + 1
os.rename(srcname, dstname)
if dev:
dev.relog()
subprocess.call(['chown', '-f', getpass.getuser(), srcname])
except Exception, e:
logger.info("Failed to rename log files")
logger.info(e)
def main():
tests = []
test_modules = []
files = os.listdir(scriptsdir)
for t in files:
m = re.match(r'(test_.*)\.py$', t)
if m:
logger.debug("Import test cases from " + t)
mod = __import__(m.group(1))
test_modules.append(mod.__name__.replace('test_', '', 1))
for key,val in mod.__dict__.iteritems():
if key.startswith("test_"):
tests.append(val)
test_names = list(set([t.__name__.replace('test_', '', 1) for t in tests]))
run = None
parser = argparse.ArgumentParser(description='hwsim test runner')
parser.add_argument('--logdir', metavar='<directory>',
help='log output directory for all other options, ' +
'must be given if other log options are used')
group = parser.add_mutually_exclusive_group()
group.add_argument('-d', const=logging.DEBUG, action='store_const',
dest='loglevel', default=logging.INFO,
help="verbose debug output")
group.add_argument('-q', const=logging.WARNING, action='store_const',
dest='loglevel', help="be quiet")
parser.add_argument('-S', metavar='<sqlite3 db>', dest='database',
help='database to write results to')
parser.add_argument('--prefill-tests', action='store_true', dest='prefill',
help='prefill test database with NOTRUN before all tests')
parser.add_argument('--commit', metavar='<commit id>',
help='commit ID, only for database')
parser.add_argument('-b', metavar='<build>', dest='build', help='build ID')
parser.add_argument('-L', action='store_true', dest='update_tests_db',
help='List tests (and update descriptions in DB)')
parser.add_argument('-T', action='store_true', dest='tracing',
help='collect tracing per test case (in log directory)')
parser.add_argument('-D', action='store_true', dest='dmesg',
help='collect dmesg per test case (in log directory)')
parser.add_argument('--shuffle-tests', action='store_true',
dest='shuffle_tests',
help='Shuffle test cases to randomize order')
parser.add_argument('--split', help='split tests for parallel execution (<server number>/<total servers>)')
parser.add_argument('--no-reset', action='store_true', dest='no_reset',
help='Do not reset devices at the end of the test')
parser.add_argument('--long', action='store_true',
help='Include test cases that take long time')
parser.add_argument('-f', dest='testmodules', metavar='<test module>',
help='execute only tests from these test modules',
type=str, choices=[[]] + test_modules, nargs='+')
parser.add_argument('-l', metavar='<modules file>', dest='mfile',
help='test modules file name')
parser.add_argument('-i', action='store_true', dest='stdin_ctrl',
help='stdin-controlled test case execution')
parser.add_argument('tests', metavar='<test>', nargs='*', type=str,
help='tests to run (only valid without -f)',
choices=[[]] + test_names)
args = parser.parse_args()
if (args.tests and args.testmodules) or (args.tests and args.mfile) or (args.testmodules and args.mfile):
print 'Invalid arguments - only one of (test, test modules, modules file) can be given.'
sys.exit(2)
if args.database:
if not sqlite3_imported:
print "No sqlite3 module found"
sys.exit(2)
conn = sqlite3.connect(args.database)
conn.execute('CREATE TABLE IF NOT EXISTS results (test,result,run,time,duration,build,commitid)')
conn.execute('CREATE TABLE IF NOT EXISTS tests (test,description)')
conn.execute('CREATE TABLE IF NOT EXISTS logs (test,run,type,contents)')
else:
conn = None
if conn:
run = int(time.time())
# read the modules from the modules file
if args.mfile:
args.testmodules = []
with open(args.mfile) as f:
for line in f.readlines():
line = line.strip()
if not line or line.startswith('#'):
continue
args.testmodules.append(line)
tests_to_run = []
if args.tests:
for selected in args.tests:
for t in tests:
name = t.__name__.replace('test_', '', 1)
if name == selected:
tests_to_run.append(t)
else:
for t in tests:
name = t.__name__.replace('test_', '', 1)
if args.testmodules:
if not t.__module__.replace('test_', '', 1) in args.testmodules:
continue
tests_to_run.append(t)
if args.update_tests_db:
for t in tests_to_run:
name = t.__name__.replace('test_', '', 1)
if t.__doc__ is None:
print name + " - MISSING DESCRIPTION"
else:
print name + " - " + t.__doc__
if conn:
sql = 'INSERT OR REPLACE INTO tests(test,description) VALUES (?, ?)'
params = (name, t.__doc__)
try:
conn.execute(sql, params)
except Exception, e:
print "sqlite: " + str(e)
print "sql: %r" % (params,)
if conn:
conn.commit()
conn.close()
sys.exit(0)
if not args.logdir:
if os.path.exists('logs/current'):
args.logdir = 'logs/current'
else:
args.logdir = 'logs'
# Write debug level log to a file and configurable verbosity to stdout
logger.setLevel(logging.DEBUG)
stdout_handler = logging.StreamHandler()
stdout_handler.setLevel(args.loglevel)
logger.addHandler(stdout_handler)
file_name = os.path.join(args.logdir, 'run-tests.log')
log_handler = logging.FileHandler(file_name)
log_handler.setLevel(logging.DEBUG)
fmt = "%(asctime)s %(levelname)s %(message)s"
log_formatter = logging.Formatter(fmt)
log_handler.setFormatter(log_formatter)
logger.addHandler(log_handler)
dev0 = WpaSupplicant('wlan0', '/tmp/wpas-wlan0')
dev1 = WpaSupplicant('wlan1', '/tmp/wpas-wlan1')
dev2 = WpaSupplicant('wlan2', '/tmp/wpas-wlan2')
dev = [ dev0, dev1, dev2 ]
apdev = [ ]
apdev.append({"ifname": 'wlan3', "bssid": "02:00:00:00:03:00"})
apdev.append({"ifname": 'wlan4', "bssid": "02:00:00:00:04:00"})
for d in dev:
if not d.ping():
logger.info(d.ifname + ": No response from wpa_supplicant")
return
logger.info("DEV: " + d.ifname + ": " + d.p2p_dev_addr())
for ap in apdev:
logger.info("APDEV: " + ap['ifname'])
passed = []
skipped = []
failed = []
# make sure nothing is left over from previous runs
# (if there were any other manual runs or we crashed)
if not reset_devs(dev, apdev):
if conn:
conn.close()
conn = None
sys.exit(1)
if args.dmesg:
subprocess.call(['dmesg', '-c'], stdout=open('/dev/null', 'w'))
if conn and args.prefill:
for t in tests_to_run:
name = t.__name__.replace('test_', '', 1)
report(conn, False, args.build, args.commit, run, name, 'NOTRUN', 0,
args.logdir, sql_commit=False)
conn.commit()
if args.split:
vals = args.split.split('/')
split_server = int(vals[0])
split_total = int(vals[1])
logger.info("Parallel execution - %d/%d" % (split_server, split_total))
split_server -= 1
tests_to_run.sort(key=lambda t: t.__name__)
tests_to_run = [x for i,x in enumerate(tests_to_run) if i % split_total == split_server]
if args.shuffle_tests:
from random import shuffle
shuffle(tests_to_run)
count = 0
if args.stdin_ctrl:
print "READY"
sys.stdout.flush()
num_tests = 0
else:
num_tests = len(tests_to_run)
if args.stdin_ctrl:
set_term_echo(sys.stdin.fileno(), False)
while True:
if args.stdin_ctrl:
test = sys.stdin.readline()
if not test:
break
test = test.splitlines()[0]
if test == '':
break
t = None
for tt in tests:
name = tt.__name__.replace('test_', '', 1)
if name == test:
t = tt
break
if not t:
print "NOT-FOUND"
sys.stdout.flush()
continue
else:
if len(tests_to_run) == 0:
break
t = tests_to_run.pop(0)
name = t.__name__.replace('test_', '', 1)
open('/dev/kmsg', 'w').write('running hwsim test case %s\n' % name)
if log_handler:
log_handler.stream.close()
logger.removeHandler(log_handler)
file_name = os.path.join(args.logdir, name + '.log')
log_handler = logging.FileHandler(file_name)
log_handler.setLevel(logging.DEBUG)
log_handler.setFormatter(log_formatter)
logger.addHandler(log_handler)
reset_ok = True
with DataCollector(args.logdir, name, args.tracing, args.dmesg):
count = count + 1
msg = "START {} {}/{}".format(name, count, num_tests)
logger.info(msg)
if args.loglevel == logging.WARNING:
print msg
sys.stdout.flush()
if t.__doc__:
logger.info("Test: " + t.__doc__)
start = datetime.now()
for d in dev:
try:
d.dump_monitor()
if not d.ping():
raise Exception("PING failed for {}".format(d.ifname))
if not d.global_ping():
raise Exception("Global PING failed for {}".format(d.ifname))
d.request("NOTE TEST-START " + name)
except Exception, e:
logger.info("Failed to issue TEST-START before " + name + " for " + d.ifname)
logger.info(e)
print "FAIL " + name + " - could not start test"
if conn:
conn.close()
conn = None
if args.stdin_ctrl:
set_term_echo(sys.stdin.fileno(), True)
sys.exit(1)
try:
if t.func_code.co_argcount > 2:
params = {}
params['logdir'] = args.logdir
params['long'] = args.long
t(dev, apdev, params)
elif t.func_code.co_argcount > 1:
t(dev, apdev)
else:
t(dev)
result = "PASS"
except HwsimSkip, e:
logger.info("Skip test case: %s" % e)
result = "SKIP"
except NameError, e:
import traceback
logger.info(e)
traceback.print_exc()
result = "FAIL"
except Exception, e:
import traceback
logger.info(e)
traceback.print_exc()
if args.loglevel == logging.WARNING:
print "Exception: " + str(e)
result = "FAIL"
for d in dev:
try:
d.dump_monitor()
d.request("NOTE TEST-STOP " + name)
except Exception, e:
logger.info("Failed to issue TEST-STOP after {} for {}".format(name, d.ifname))
logger.info(e)
result = "FAIL"
wpas = None
try:
wpas = WpaSupplicant(global_iface="/tmp/wpas-wlan5")
rename_log(args.logdir, 'log5', name, wpas)
if not args.no_reset:
wpas.remove_ifname()
except Exception, e:
pass
if wpas:
wpas.close_ctrl()
if args.no_reset:
print "Leaving devices in current state"
else:
reset_ok = reset_devs(dev, apdev)
for i in range(0, 3):
rename_log(args.logdir, 'log' + str(i), name, dev[i])
try:
hapd = HostapdGlobal()
except Exception, e:
print "Failed to connect to hostapd interface"
print str(e)
reset_ok = False
result = "FAIL"
hapd = None
rename_log(args.logdir, 'hostapd', name, hapd)
if hapd:
del hapd
hapd = None
wt = Wlantest()
rename_log(args.logdir, 'hwsim0.pcapng', name, wt)
rename_log(args.logdir, 'hwsim0', name, wt)
if os.path.exists(os.path.join(args.logdir, 'fst-wpa_supplicant')):
rename_log(args.logdir, 'fst-wpa_supplicant', name, None)
if os.path.exists(os.path.join(args.logdir, 'fst-hostapd')):
rename_log(args.logdir, 'fst-hostapd', name, None)
end = datetime.now()
diff = end - start
if result == 'PASS' and args.dmesg:
if not check_kernel(os.path.join(args.logdir, name + '.dmesg')):
logger.info("Kernel issue found in dmesg - mark test failed")
result = 'FAIL'
if result == 'PASS':
passed.append(name)
elif result == 'SKIP':
skipped.append(name)
else:
failed.append(name)
report(conn, args.prefill, args.build, args.commit, run, name, result,
diff.total_seconds(), args.logdir)
result = "{} {} {} {}".format(result, name, diff.total_seconds(), end)
logger.info(result)
if args.loglevel == logging.WARNING:
print result
sys.stdout.flush()
if not reset_ok:
print "Terminating early due to device reset failure"
break
if args.stdin_ctrl:
set_term_echo(sys.stdin.fileno(), True)
if log_handler:
log_handler.stream.close()
logger.removeHandler(log_handler)
file_name = os.path.join(args.logdir, 'run-tests.log')
log_handler = logging.FileHandler(file_name)
log_handler.setLevel(logging.DEBUG)
log_handler.setFormatter(log_formatter)
logger.addHandler(log_handler)
if conn:
conn.close()
if len(failed):
logger.info("passed {} test case(s)".format(len(passed)))
logger.info("skipped {} test case(s)".format(len(skipped)))
logger.info("failed tests: " + ' '.join(failed))
if args.loglevel == logging.WARNING:
print "failed tests: " + ' '.join(failed)
sys.exit(1)
logger.info("passed all {} test case(s)".format(len(passed)))
if len(skipped):
logger.info("skipped {} test case(s)".format(len(skipped)))
if args.loglevel == logging.WARNING:
print "passed all {} test case(s)".format(len(passed))
if len(skipped):
print "skipped {} test case(s)".format(len(skipped))
if __name__ == "__main__":
main()
| 37.690559 | 174 | 0.536249 |
c35108d24d724653b32511186642fab722de77de | 1,637 | py | Python | source/_static/solutions/lesson06/exc2_solution_linear_classifier_with_polar_coordinate_transformation.py | janEbert/deeplearning540.github.io | 4a13e12503b029e8abc06bb1f499f89399e4142a | [
"MIT"
] | 6 | 2021-03-01T07:00:32.000Z | 2022-03-23T15:33:53.000Z | source/_static/solutions/lesson06/exc2_solution_linear_classifier_with_polar_coordinate_transformation.py | janEbert/deeplearning540.github.io | 4a13e12503b029e8abc06bb1f499f89399e4142a | [
"MIT"
] | 17 | 2021-02-22T13:16:37.000Z | 2022-03-23T16:42:47.000Z | source/_static/solutions/lesson06/exc2_solution_linear_classifier_with_polar_coordinate_transformation.py | janEbert/deeplearning540.github.io | 4a13e12503b029e8abc06bb1f499f89399e4142a | [
"MIT"
] | 14 | 2021-02-22T11:38:28.000Z | 2022-01-12T15:08:48.000Z | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.datasets import load_wine
from sklearn.linear_model import RidgeClassifier
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
def plot_prediction(X, y, y_pred, *, title=''):
if isinstance(X, pd.DataFrame):
X = X.to_numpy()
fig, (left, right) = plt.subplots(ncols=2, figsize=(14, 6))
fig.suptitle(title)
left.set(title='Ground truth labels')
left.scatter(*X.transpose(), c=y)
right.set(title='Predicted labels')
right.scatter(*X.transpose(), c=y_pred)
df = pd.read_csv('data.csv', index_col=0)
df = df.assign(
radius=df[['x', 'y']].pow(2).sum(axis=1),
phi=np.arctan2(df['x'], df['y']),
)
features = df[['radius', 'phi']]
target = df['target']
X_train, X_test, y_train, y_test = train_test_split(
features, target,
test_size=0.3,
random_state=0,
shuffle=True,
)
model = RidgeClassifier()
model.fit(X_train, y_train)
cm_train = confusion_matrix(y_train, model.predict(X_train))
print(
'Confusion matrix for training data:', cm_train,
f'Accuracy: {np.diag(cm_train).sum() / cm_train.sum()}',
sep='\n',
)
cm_test = confusion_matrix(y_test, model.predict(X_test))
print(
'Confusion matrix for test data:', cm_test,
f'Accuracy: {np.diag(cm_test).sum() / cm_test.sum()}',
sep='\n',
)
plot_prediction(X_train, y_train, model.predict(X_train), title='Training data')
plot_prediction(X_test, y_test, model.predict(X_test), title='Test data')
plt.show()
| 28.224138 | 80 | 0.701283 |
c659a16e8d2b18d6cbd00506fb956851ff5bebe5 | 908 | py | Python | other/inet_suite.py | sshyran/testoob | 729fa6a17660d0bd8c75907a89ed6998180b5765 | [
"Apache-2.0"
] | null | null | null | other/inet_suite.py | sshyran/testoob | 729fa6a17660d0bd8c75907a89ed6998180b5765 | [
"Apache-2.0"
] | null | null | null | other/inet_suite.py | sshyran/testoob | 729fa6a17660d0bd8c75907a89ed6998180b5765 | [
"Apache-2.0"
] | null | null | null | import unittest, urllib
class NewsSitesTestCase(unittest.TestCase):
def testSlashdot(self):
urllib.urlopen("http://www.slashdot.org").read()
def testWired(self):
urllib.urlopen("http://www.wired.com").read()
def testTheOnion(self):
urllib.urlopen("http://www.theonion.com").read()
class OtherSitesTestCase(unittest.TestCase):
def testYahoo(self):
urllib.urlopen("http://www.yahoo.com").read()
def testGoogle(self):
urllib.urlopen("http://www.google.com").read()
def testPython(self):
urllib.urlopen("http://www.python.org").read()
def testThinlet(self):
urllib.urlopen("http://thinlet.sourceforge.net").read()
def suite():
result = unittest.TestSuite()
result.addTest( unittest.makeSuite(NewsSitesTestCase) )
result.addTest( unittest.makeSuite(OtherSitesTestCase) )
return result
| 34.923077 | 64 | 0.659692 |
bcbb5ee2761d1213793ae56f221a4b6eb7a5e252 | 67 | py | Python | configure-repos/config.py | grobalex/github-scripts- | 891d17cf20645e39415723dd8f5e91a610ea2267 | [
"MIT"
] | null | null | null | configure-repos/config.py | grobalex/github-scripts- | 891d17cf20645e39415723dd8f5e91a610ea2267 | [
"MIT"
] | null | null | null | configure-repos/config.py | grobalex/github-scripts- | 891d17cf20645e39415723dd8f5e91a610ea2267 | [
"MIT"
] | null | null | null | # config.py:
githuburl = 'github.ccs.neu.edu'
githuborg = 'CS5500' | 16.75 | 32 | 0.701493 |
2394b3c750055942bd5d7d45e427cba0f773c70b | 25,899 | py | Python | Kernels/Research/FFT/FFT.py | WoodData/EndpointAI | 8e4d145ff45cf5559ab009eba4f423e944dc6975 | [
"Apache-2.0"
] | 190 | 2020-09-22T02:14:29.000Z | 2022-03-28T02:35:57.000Z | Kernels/Research/FFT/FFT.py | chuancqc/EndpointAI | ab67cefeae3c06f1c93f66812bcf988c14e72ff1 | [
"Apache-2.0"
] | 2 | 2021-08-30T10:06:22.000Z | 2021-11-05T20:37:58.000Z | Kernels/Research/FFT/FFT.py | chuancqc/EndpointAI | ab67cefeae3c06f1c93f66812bcf988c14e72ff1 | [
"Apache-2.0"
] | 80 | 2020-09-13T17:48:56.000Z | 2022-03-19T10:45:05.000Z | #
#
# Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the License); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os.path
import numpy as np
import itertools
import scipy.fftpack
import scipy.fft
import argparse
import sys
parser = argparse.ArgumentParser(description='Pattern generation')
parser.add_argument('-f', nargs='?',type = str, default="", help="Path to folder containing Patterns folder")
parser.add_argument('-r', nargs='?',type = str, help="CMSIS Root")
# -r is pointing to Pattern generation folder
# CMSIS/DSP/Testing/PatternGeneration/
args = parser.parse_args()
if args.r:
sys.path.append(args.r)
else:
sys.path.append("PatternGeneration")
import Tools
# Those patterns are used for tests and benchmarks.
# For tests, there is the need to add tests for saturation
# Radix 2,3 and 5
# Primary blocks are 8,6,5,4,3,2
# We take the square and then all combinations
PRIMARY=[8,6,5,4,3,2]
def cartesian(*somelists):
r=[]
for element in itertools.product(*somelists):
r.append(list(element))
return(r)
SECOND = [x*y for (x,y) in cartesian(PRIMARY,PRIMARY)]
THIRD = [x*y for (x,y) in cartesian(SECOND,PRIMARY)]
FFTSIZES = PRIMARY + SECOND + THIRD
FFTSIZES = sorted(list(set(FFTSIZES))) + [4096]
def iseven(a):
return(a%2==0)
# The real FFT size is only working on even length FFTs
REALFFTSIZES=list(filter(iseven,FFTSIZES))[1:-1] + [4096]
FFT2DSIZES=[(4,9),(8,4),(64,4)]
# [512, 384, 2, 3, 4, 5, 6, 256, 8, 9, 10, 128, 12, 15, 16, 144,
# 18, 20, 150, 24, 25, 27, 30, 32, 288, 160, 36, 40,
# 45, 48, 50, 180, 54, 60, 64, 320, 192, 200, 72, 75, 80,
# 216, 90, 96, 100, 108, 240, 120, 125]
#FFTSIZES=[4, 8, 6, 10, 15,16,32,64,9,27,81,5,25,125]
SINES=[0.25,0.5,0.9]
NOISES=[0.1,0.4]
FIXEDPOINTSCALING = [0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12]
def scaling(nb):
return(1.0 / (1 << FIXEDPOINTSCALING[nb-1]))
#print(FFTSIZES)
#print(REALFFTSIZES)
#print(FFT2DSIZES)
#
#quit()
def asReal(a):
#return(a.view(dtype=np.float64))
return(a.reshape(np.size(a)).view(dtype=np.float64))
def noiseSignal(nb):
return(np.random.randn(nb))
def sineSignal(freqRatio,nb):
fc = nb / 2.0
f = freqRatio*fc
time = np.arange(0,nb)
return(np.sin(2 * np.pi * f * time/nb))
def noisySineSignal(noiseAmp,r,nb):
return(noiseAmp*noiseSignal(nb) + r*sineSignal(0.25,nb))
def stepSignal(r,nb):
na = int(nb/2)
nb = nb - na
return(np.concatenate((np.zeros(na), r*np.ones(nb))))
def writeFFTForSignal(config,mode,sig,i,nb,signame):
sig = sig / 4.0
fft=scipy.fftpack.fft(sig)
sigfft = sig
fftoutput = fft
if mode == Tools.Q15 or mode == Tools.Q31:
fftoutput = fftoutput * scaling(nb)
maxVal = np.max(np.abs(fft))
sigifft = fft / maxVal / 2.0
ifftoutput = sig / maxVal / 2.0
if mode == Tools.Q15 or mode == Tools.Q31:
ifftoutput = ifftoutput / 4.0
config.writeInput(i, asReal(sigfft),"ComplexInputFFTSamples_%s_%d_" % (signame,nb))
config.writeInput(i, asReal(fftoutput),"ComplexOutputFFTSamples_%s_%d_" % (signame,nb))
config.writeInput(i, asReal(sigifft),"ComplexInputIFFTSamples_%s_%d_" % (signame,nb))
config.writeInput(i, asReal(ifftoutput),"ComplexOutputIFFTSamples_%s_%d_" % (signame,nb))
def writeRFFTForSignal(config,mode,sig,i,nb,signame):
sig = sig / 4.0
rfft=scipy.fft.rfft(sig)
sigfft = sig
fftoutput = rfft
if mode == Tools.Q15 or mode == Tools.Q31:
fftoutput = fftoutput * scaling(int(nb/2))
maxVal = np.max(np.abs(rfft))
sigifft = rfft / maxVal / 2.0
ifftoutput = sig / maxVal / 2.0
if mode == Tools.Q15 or mode == Tools.Q31:
ifftoutput = ifftoutput / 4.0
config.writeInput(i, asReal(sigfft),"RealInputRFFTSamples_%s_%d_" % (signame,nb))
config.writeInput(i, asReal(fftoutput),"ComplexOutputRFFTSamples_%s_%d_" % (signame,nb))
config.writeInput(i, asReal(sigifft),"ComplexInputRIFFTSamples_%s_%d_" % (signame,nb))
config.writeInput(i, asReal(ifftoutput),"RealOutputRIFFTSamples_%s_%d_" % (signame,nb))
def writeCFFTTests(configs):
i = 1
# Write FFT tests for sinusoid
for nb in FFTSIZES:
sig = noisySineSignal(0.05,0.7,nb)
sigc = np.array([complex(x) for x in sig])
for config,mode in configs:
writeFFTForSignal(config,mode,sigc,i,nb,"Noisy")
i = i + 1
# Write FFT tests for step
for nb in FFTSIZES:
sig = stepSignal(0.5,nb)
sigc = np.array([complex(x) for x in sig])
for config,mode in configs:
writeFFTForSignal(config,mode,sigc,i,nb,"Step")
i = i + 1
#############################
# Used for benchmarks
#
## Add a new size for benchmark
BENCHSIZE = 4096
sig = noisySineSignal(0.05,0.7,BENCHSIZE)
sigc = np.array([complex(x) for x in sig])
for config,mode in configs:
writeFFTForSignal(config,mode,sigc,i,BENCHSIZE,"Noisy")
i = i + 1
return(i)
def asReal2D(a):
#return(a.view(dtype=np.float64))
return(a.reshape(np.size(a)).view(dtype=np.float64))
def writeFFT2DForSignal(config,mode,sig,i,rows,cols,signame):
fft=scipy.fft.fftn(sig)
sigfft = sig
fftoutput = fft
sigifft = sig
ifftoutput = scipy.fft.ifftn(sigifft)
if mode == Tools.Q15 or mode == Tools.Q31:
fftoutput = fftoutput * scaling(rows) * scaling(cols)
ifftoutput = ifftoutput / 4.0
config.writeInput(i, asReal2D(sigfft),"ComplexInputFFTSamples_%s_%d_%d_" % (signame,rows,cols))
config.writeInput(i, asReal2D(fftoutput),"ComplexOutputFFTSamples_%s_%d_%d_" % (signame,rows,cols))
config.writeInput(i, asReal2D(sigifft),"ComplexInputIFFTSamples_%s_%d_%d_" % (signame,rows,cols))
config.writeInput(i, asReal2D(ifftoutput),"ComplexOutputIFFTSamples_%s_%d_%d_" % (signame,rows,cols))
def writeCFFT2DTests(configs):
i = 1
# Write FFT2D tests for sinusoid
fr = 10
fc = 10
for (rows,cols) in FFT2DSIZES:
[X, Y] = np.meshgrid(2 * np.pi * np.arange(cols) * fr,
2 * np.pi * np.arange(rows) * fc)
sig = Tools.normalize(np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape))
sigc = np.array([[complex(y) for y in x] for x in sig])
for config,mode in configs:
writeFFT2DForSignal(config,mode,sigc,i,rows,cols,"Noisy")
i = i + 1
#############################
# Used for benchmarks
#
## Add a new size for benchmark
BENCHSIZE = 4096
sig = noisySineSignal(0.05,0.7,BENCHSIZE)
sigc = np.array([complex(x) for x in sig])
for config,mode in configs:
writeFFT2DForSignal(config,mode,sigc,i,64,64,"Noisy")
i = i + 1
return(i)
def writeRFFTTests(configs):
i = 1
# Write FFT tests for sinusoid
for nb in REALFFTSIZES:
sig = noisySineSignal(0.05,0.7,nb)
for config,mode in configs:
writeRFFTForSignal(config,mode,sig,i,nb,"Noisy")
i = i + 1
# Write FFT tests for step
for nb in REALFFTSIZES:
sig = stepSignal(0.5,nb)
for config,mode in configs:
writeRFFTForSignal(config,mode,sig,i,nb,"Step")
i = i + 1
#############################
# Used for benchmarks
#
## Add a new size for benchmark
BENCHSIZE = 4096
sig = noisySineSignal(0.05,0.7,BENCHSIZE)
for config,mode in configs:
writeRFFTForSignal(config,mode,sig,i,BENCHSIZE,"Noisy")
i = i + 1
return(i)
def generatePatterns():
PATTERNDIR = os.path.join(args.f,"Patterns","DSP","FFT","CFFT","CFFT")
PARAMDIR = os.path.join(args.f,"Parameters","DSP","FFT","CFFT","CFFT")
configf64=Tools.Config(PATTERNDIR,PARAMDIR,"f64")
configf32=Tools.Config(PATTERNDIR,PARAMDIR,"f32")
configf16=Tools.Config(PATTERNDIR,PARAMDIR,"f16")
configq31=Tools.Config(PATTERNDIR,PARAMDIR,"q31")
configq15=Tools.Config(PATTERNDIR,PARAMDIR,"q15")
print("CFFT")
allConfigs=[(configf64,Tools.F64),
(configf32,Tools.F32),
(configf16,Tools.F16),
(configq31,Tools.Q31),
(configq15,Tools.Q15)]
writeCFFTTests(allConfigs)
PATTERNDIR = os.path.join(args.f,"Patterns","DSP","FFT","CFFT2D","CFFT2D")
PARAMDIR = os.path.join(args.f,"Parameters","DSP","FFT","CFFT2D","CFFT2D")
configf64=Tools.Config(PATTERNDIR,PARAMDIR,"f64")
configf32=Tools.Config(PATTERNDIR,PARAMDIR,"f32")
configf16=Tools.Config(PATTERNDIR,PARAMDIR,"f16")
configq31=Tools.Config(PATTERNDIR,PARAMDIR,"q31")
configq15=Tools.Config(PATTERNDIR,PARAMDIR,"q15")
print("CFFT2D")
allConfigs=[(configf64,Tools.F64),
(configf32,Tools.F32),
(configf16,Tools.F16),
(configq31,Tools.Q31),
(configq15,Tools.Q15)]
writeCFFT2DTests(allConfigs)
PATTERNDIR = os.path.join(args.f,"Patterns","DSP","FFT","RFFT","RFFT")
PARAMDIR = os.path.join(args.f,"Parameters","DSP","FFT","RFFT","RFFT")
configf64=Tools.Config(PATTERNDIR,PARAMDIR,"f64")
configf32=Tools.Config(PATTERNDIR,PARAMDIR,"f32")
configf16=Tools.Config(PATTERNDIR,PARAMDIR,"f16")
configq31=Tools.Config(PATTERNDIR,PARAMDIR,"q31")
configq15=Tools.Config(PATTERNDIR,PARAMDIR,"q15")
print("RFFT")
allConfigs=[(configf64,Tools.F64),
(configf32,Tools.F32),
(configf16,Tools.F16),
(configq31,Tools.Q31),
(configq15,Tools.Q15)]
writeRFFTTests(allConfigs)
if __name__ == '__main__':
generatePatterns() | 74.852601 | 15,892 | 0.547241 |
194350c2fa9a9e23c8bda1e3a697fc9a3df6f491 | 1,973 | py | Python | nova/api/validation/extra_specs/traits.py | zjzh/nova | 7bb21723171c59b93e28f5d508c2b6df39220f13 | [
"Apache-2.0"
] | 1,874 | 2015-01-04T05:18:34.000Z | 2022-03-31T03:30:28.000Z | nova/api/validation/extra_specs/traits.py | zjzh/nova | 7bb21723171c59b93e28f5d508c2b6df39220f13 | [
"Apache-2.0"
] | 40 | 2015-04-13T02:32:42.000Z | 2022-02-16T02:28:06.000Z | nova/api/validation/extra_specs/traits.py | zjzh/nova | 7bb21723171c59b93e28f5d508c2b6df39220f13 | [
"Apache-2.0"
] | 1,996 | 2015-01-04T15:11:51.000Z | 2022-03-31T11:03:13.000Z | # Copyright 2020 Red Hat, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Validators for ``traits`` namespaced extra specs."""
import os_traits
from nova.api.validation.extra_specs import base
EXTRA_SPEC_VALIDATORS = []
for trait in os_traits.get_traits():
EXTRA_SPEC_VALIDATORS.append(
base.ExtraSpecValidator(
name=f'trait{{group}}:{trait}',
description=f'Require or forbid trait {trait}.',
value={
'type': str,
'enum': [
'required',
'forbidden',
],
},
parameters=[
{
'name': 'group',
'pattern': r'([a-zA-Z0-9_-]{1,64})?',
},
],
)
)
EXTRA_SPEC_VALIDATORS.append(
base.ExtraSpecValidator(
name='trait{group}:CUSTOM_{trait}',
description=(
'Require or forbid trait CUSTOM_{trait}.'
),
value={
'type': str,
'enum': [
'required',
'forbidden',
],
},
parameters=[
{
'name': 'group',
'pattern': r'([a-zA-Z0-9_-]{1,64})?',
},
{
'name': 'trait',
'pattern': r'[A-Z0-9_]+',
},
],
)
)
def register():
return EXTRA_SPEC_VALIDATORS
| 26.662162 | 75 | 0.519513 |
898e5bf5773e75b6d8e3a1ba553cbd4418406be7 | 1,323 | py | Python | src/rank.py | MyEyes/twitch-tsumego | ef7c477030af3a99dbfa35513321125b76879cb5 | [
"MIT"
] | null | null | null | src/rank.py | MyEyes/twitch-tsumego | ef7c477030af3a99dbfa35513321125b76879cb5 | [
"MIT"
] | null | null | null | src/rank.py | MyEyes/twitch-tsumego | ef7c477030af3a99dbfa35513321125b76879cb5 | [
"MIT"
] | null | null | null | import PySimpleGUI as sg
import threading
import time
W = 30 # name width
def open_window():
while True:
event, value = window.read()
if event == sg.WIN_CLOSED:
break
window.close()
def update(players, last):
i = 1
for (k,v) in sorted(players.items(), key=lambda x: -x[1]):
entry = f"{i}. {k.ljust(W-10)} {str(v).rjust(3)}"
window[i].update(entry)
i+=1
if i > 5:
break
if last:
window[0].update(f"{last} ({players[last]})".ljust(W))
window.refresh()
sg.theme('DarkPurple') # Add a touch of color
font = {'font': 'Courier 16', 'text_color': 'white'}
font2 = {'font': 'Sans-Serif 20', 'text_color': 'white'}
# All the stuff inside your window.
layout = [
[sg.Text(''.ljust(W+10), key=1, **font)],
[sg.Text(''.ljust(W+10), key=2, **font)],
[sg.Text(''.ljust(W+10), key=3, **font)],
[sg.Text(''.ljust(W+10), key=4, **font)],
[sg.Text(''.ljust(W+10), key=5, **font)],
[sg.Text(' ', **font)],
[sg.Text('Last correct answer:', **font2)],
[sg.Text(''.ljust(W+10), key=0, **font2)], ]
# Create the Window
window = sg.Window('Top Players', layout)
t = threading.Thread(target=open_window)
t.start()
time.sleep(1)
| 26.46 | 62 | 0.530612 |
427ac6f0c1e1936cbf29a742edd3843d21d6703e | 1,107 | py | Python | src/pylero/priority_option_id.py | yuxisun1217/pylero | 0210eb2243e02ab218f19a224e94eb63081f13e3 | [
"MIT"
] | null | null | null | src/pylero/priority_option_id.py | yuxisun1217/pylero | 0210eb2243e02ab218f19a224e94eb63081f13e3 | [
"MIT"
] | null | null | null | src/pylero/priority_option_id.py | yuxisun1217/pylero | 0210eb2243e02ab218f19a224e94eb63081f13e3 | [
"MIT"
] | null | null | null | # -*- coding: utf8 -*-
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
from pylero.base_polarion import BasePolarion
class PriorityOptionId(BasePolarion):
"""Object to handle the Polarion WSDL tns5:PriorityOptionId class
Attributes:
id (string)
"""
_cls_suds_map = {"id": "id",
"uri": "_uri",
"_unresolved": "_unresolved"}
_id_field = "id"
_obj_client = "builder_client"
_obj_struct = "tns5:PriorityOptionId"
def __init__(self, id=None, uri=None,
suds_object=None):
"""PriorityOptionID Constructor
Args:
id: value of the priority
uri: the SubterraURI of the priority
suds_object: the Polarion Plan object
Returns:
None
References:
_WorkItem.priority
"""
super(self.__class__, self).__init__(id, suds_object)
class ArrayOfPriorityOptionId(BasePolarion):
_obj_client = "builder_client"
_obj_struct = "tns5:ArrayOfPriorityOptionId"
| 26.357143 | 69 | 0.634146 |
4b3d41aca1780fc944db1264a5e283cfb651234e | 1,753 | py | Python | join_annovar_vcf.py | sensecollective/bioinformatics_scripts | 3a23611f382b7f3dd60e5e2abe841b84408c0d44 | [
"BSD-3-Clause"
] | 7 | 2016-03-23T11:31:06.000Z | 2021-05-20T19:07:38.000Z | join_annovar_vcf.py | raonyguimaraes/bioinformatics_scripts | 3a23611f382b7f3dd60e5e2abe841b84408c0d44 | [
"BSD-3-Clause"
] | null | null | null | join_annovar_vcf.py | raonyguimaraes/bioinformatics_scripts | 3a23611f382b7f3dd60e5e2abe841b84408c0d44 | [
"BSD-3-Clause"
] | 8 | 2016-06-01T19:28:46.000Z | 2022-01-09T01:26:10.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from optparse import OptionParser
import os
from time import time
import datetime
import csv
__author__ = "Raony Guimarães"
__copyright__ = "Copyright 2011, The Exome Pipeline"
__credits__ = ["Raony Guimarães"]
__license__ = "GPL"
__version__ = "1.0.1"
__maintainer__ = "Raony Guimarães"
__email__ = "raonyguimaraes@gmail.com"
__status__ = "Production"
#run example
#python gatk.py -i alignment/exome.sorted.bam
parser = OptionParser()
parser.add_option("-a", dest="annovar_file",
help="CSV Generated by annovar", metavar="ANN")
parser.add_option("-v", dest="vcf_file",
help="VCF File Generated by GATK", metavar="VCF")
(options, args) = parser.parse_args()
annovar_file = csv.reader(open(options.annovar_file, "rb"))
header = annovar_file.next()
print header
#self.variants_array = array(list(annovar_file))
# 10 e 11
variants = {}
for line in annovar_file:
variant = {}
variant['sift'] = line[10]
variant['polyphen'] = line[11]
variant_id = "%s-%s" % (line[15], line[16])
variants[variant_id] = variant
print len(variants)
vcf_file=open(options.vcf_file, 'r')
out_file=open(options.vcf_file+'.annovar.vcf', 'w')
for line in vcf_file:
if line.startswith('#'):
out_file.writelines(line)
else:
line = line.split('\t')
variant_id = "%s-%s" % (line[0], line[1])
if variant_id in variants:
if variants[variant_id]['sift'] != '':
line[7] = line[7]+';ANN_SIFT=%s' % (variants[variant_id]['sift'])
if variants[variant_id]['polyphen'] != '':
line[7] = line[7]+';ANN_POLYPHEN=%s' % (variants[variant_id]['polyphen'])
out_file.writelines("\t".join(line))
else:
out_file.writelines("\t".join(line))
| 26.164179 | 79 | 0.667998 |
b59881c23c09af14294421402be935cdcabae142 | 10,045 | py | Python | scrapy_do/schedule.py | umairwaheed/scrapy-do | 2425601ed5f18ef2e11449dc6ebb3c4c7169986a | [
"BSD-3-Clause"
] | null | null | null | scrapy_do/schedule.py | umairwaheed/scrapy-do | 2425601ed5f18ef2e11449dc6ebb3c4c7169986a | [
"BSD-3-Clause"
] | null | null | null | scrapy_do/schedule.py | umairwaheed/scrapy-do | 2425601ed5f18ef2e11449dc6ebb3c4c7169986a | [
"BSD-3-Clause"
] | null | null | null | #-------------------------------------------------------------------------------
# Author: Lukasz Janyst <lukasz@jany.st>
# Date: 02.12.2017
#
# Licensed under the 3-Clause BSD License, see the LICENSE file for details.
#-------------------------------------------------------------------------------
"""
Functionality related to the database of jobs.
"""
import dateutil.parser
import sqlite3
import uuid
from scrapy_do.utils import TimeStamper
from datetime import datetime
from enum import Enum
#-------------------------------------------------------------------------------
class Status(Enum):
"""
Status of the job.
"""
SCHEDULED = 1
PENDING = 2
RUNNING = 3
CANCELED = 4
SUCCESSFUL = 5
FAILED = 6
#-------------------------------------------------------------------------------
class Actor(Enum):
"""
An entity responsible for job creation.
"""
SCHEDULER = 1
USER = 2
#-------------------------------------------------------------------------------
class Job:
"""
A bin for all the parameters of a job.
"""
status = TimeStamper('_status')
actor = TimeStamper('_actor')
schedule = TimeStamper('_schedule')
project = TimeStamper('_project')
spider = TimeStamper('_spider')
duration = TimeStamper('_duration')
#---------------------------------------------------------------------------
def __init__(self, status=None, actor=None, schedule=None,
project=None, spider=None, timestamp=None, duration=None):
self.identifier = str(uuid.uuid4())
self._status = status
self._actor = actor
self._schedule = schedule
self._project = project
self._spider = spider
self.timestamp = timestamp or datetime.now()
self._duration = duration
#---------------------------------------------------------------------------
def __str__(self):
s = 'Job[id="{}", actor="{}", schedule="{}", project="{}", spider="{}"]'
s = s.format(self.identifier, self.actor.name, self.schedule,
self.project, self.spider)
return s
#---------------------------------------------------------------------------
def to_dict(self):
"""
Return all the parameters of the job as a dictionary
"""
d = {
'identifier': self.identifier,
'status': self.status.name,
'actor': self.actor.name,
'schedule': self.schedule,
'project': self.project,
'spider': self.spider,
'timestamp': str(self.timestamp),
'duration': self.duration
}
return d
#-------------------------------------------------------------------------------
def _record_to_job(x):
job = Job(status=Status(x[1]), actor=Actor(x[2]), schedule=x[3],
project=x[4], spider=x[5], timestamp=dateutil.parser.parse(x[6]),
duration=x[7])
job.identifier = x[0]
return job
#-------------------------------------------------------------------------------
class Schedule:
"""
A persistent database of jobs.
:param database: A file name where the database will be stored
:param table: Name of the table containing the schedule
"""
#---------------------------------------------------------------------------
def __init__(self, database=None, table='schedule'):
#-----------------------------------------------------------------------
# Create the database and the main table
#-----------------------------------------------------------------------
self.database = database or ':memory:'
self.table = table
self.db = sqlite3.connect(self.database,
detect_types=sqlite3.PARSE_DECLTYPES)
query = "CREATE TABLE IF NOT EXISTS {table} (" \
"identifier VARCHAR(36) PRIMARY KEY, " \
"status INTEGER NOT NULL, " \
"actor INTEGER NOT NULL, " \
"schedule VARCHAR(255), " \
"project VARCHAR(255) NOT NULL, " \
"spider VARCHAR(255) NOT NULL, " \
"timestamp DATETIME NOT NULL, " \
"duration INTEGER" \
")"
query = query.format(table=self.table)
self.db.execute(query)
#-----------------------------------------------------------------------
# Create the metadata table
#-----------------------------------------------------------------------
query = 'CREATE TABLE IF NOT EXISTS schedule_metadata (' \
'key VARCHAR(255) PRIMARY KEY ON CONFLICT IGNORE, ' \
'value VARCHAR(255) NOT NULL ' \
')'
self.db.execute(query)
query = 'INSERT INTO schedule_metadata ' \
'(key, value) values ("version", "1") '
self.db.execute(query)
self.db.commit()
#---------------------------------------------------------------------------
def get_metadata(self, key):
"""
Retrieve the matadata info with a given key
"""
query = "SELECT * FROM schedule_metadata WHERE key=?"
response = self.db.execute(query, (key, ))
response = dict(response)
return response[key]
#---------------------------------------------------------------------------
def get_jobs(self, job_status):
"""
Retrieve a list of jobs with a given status
:param job_status: One of :class:`statuses <Status>`
"""
query = "SELECT * FROM {table} WHERE status=? ORDER BY timestamp DESC"
query = query.format(table=self.table)
response = self.db.execute(query, (job_status.value, ))
return [_record_to_job(rec) for rec in response]
#---------------------------------------------------------------------------
def get_active_jobs(self):
"""
Retrieve all the active jobs. Ie. all the jobs whose status is one of
the following: :data:`SCHEDULED <Status.SCHEDULED>`,
:data:`PENDING <Status.PENDING>`, or :data:`RUNNING <Status.RUNNING>`.
"""
query = "SELECT * FROM {table} WHERE " \
"status=1 OR status=2 OR status=3 "\
"ORDER BY timestamp DESC"
query = query.format(table=self.table)
response = self.db.execute(query)
return [_record_to_job(rec) for rec in response]
#---------------------------------------------------------------------------
def get_completed_jobs(self):
"""
Retrieve all the completed jobs. Ie. all the jobs whose status is one of
the following: :data:`SUCCESSFUL <Status.SUCCESSFUL>`,
:data:`FAILED <Status.FAILED>`, or :data:`CANCELED <Status.CANCELED>`.
"""
query = "SELECT * FROM {table} WHERE " \
"status=4 OR status=5 OR status=6 "\
"ORDER BY timestamp DESC"
query = query.format(table=self.table)
response = self.db.execute(query)
return [_record_to_job(rec) for rec in response]
#---------------------------------------------------------------------------
def get_scheduled_jobs(self, project):
"""
Retrieve all the scheduled jobs for the given project.
"""
query = "SELECT * FROM {table} WHERE " \
"status=1 AND project=?" \
"ORDER BY timestamp DESC"
query = query.format(table=self.table)
response = self.db.execute(query, (project, ))
return [_record_to_job(rec) for rec in response]
#---------------------------------------------------------------------------
def get_job(self, identifier):
"""
Retrieve a job by id
:param identifier: A string identifier of the job
"""
query = "SELECT * FROM {table} WHERE identifier=?"
query = query.format(table=self.table)
response = self.db.execute(query, (identifier, ))
rec = response.fetchone()
if rec is None:
raise ValueError('No such job: "{}"'.format(identifier))
return _record_to_job(rec)
#---------------------------------------------------------------------------
def add_job(self, job):
"""
Add a job to the database
:param job: A :class:`Job <Job>` object
"""
query = "INSERT INTO {table}" \
"(identifier, status, actor, schedule, project, spider, " \
"timestamp, duration) " \
"values (?, ?, ?, ?, ?, ?, ?, ?)"
query = query.format(table=self.table)
self.db.execute(query, (job.identifier, job.status.value,
job.actor.value, job.schedule, job.project,
job.spider, job.timestamp, job.duration))
self.db.commit()
#---------------------------------------------------------------------------
def commit_job(self, job):
"""
Modify an existing job
:param job: A :class:`Job <Job>` object
"""
query = "REPLACE INTO {table}" \
"(identifier, status, actor, schedule, project, spider, " \
"timestamp, duration) " \
"values (?, ?, ?, ?, ?, ?, ?, ?)"
query = query.format(table=self.table)
self.db.execute(query, (job.identifier, job.status.value,
job.actor.value, job.schedule, job.project,
job.spider, job.timestamp, job.duration))
self.db.commit()
#---------------------------------------------------------------------------
def remove_job(self, job_id):
"""
Remove a job from the database
:param identifier: A string identifier of the job
"""
query = "DELETE FROM {table} WHERE identifier=?"
query = query.format(table=self.table)
self.db.execute(query, (job_id,))
self.db.commit()
| 37.203704 | 80 | 0.452364 |
6446bc7cca8344fd1a4eca62cf19c51496e48aca | 1,691 | py | Python | GenerativeModels/BGAN/generator.py | rexwangcc/gengine | 25a49e19aca8527fec9c661b80e6d5598ac30ed1 | [
"MIT"
] | null | null | null | GenerativeModels/BGAN/generator.py | rexwangcc/gengine | 25a49e19aca8527fec9c661b80e6d5598ac30ed1 | [
"MIT"
] | null | null | null | GenerativeModels/BGAN/generator.py | rexwangcc/gengine | 25a49e19aca8527fec9c661b80e6d5598ac30ed1 | [
"MIT"
] | null | null | null | from __future__ import print_function
import torch.nn as nn
class Generator(nn.Module):
def __init__(self, ngpu, nz, ngf, nc):
"""The model definition of the generator. This class refers to the PyTorch DCGAN TUTORIAL:
https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html
Args:
ngpu (int): The number of GPUs available. If this is 0, code will run in CPU mode. If this number is
greater than 0 it will run on that number of GPUs.
nz (int): The size of the latent z vector.
ngf (int): The depth of feature maps propagated through the discriminator.
nc (int): The number of the channels of the input images.
"""
super().__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
nn.ConvTranspose2d(ngf * 2, nc, 4, 2, 1, bias=False),
nn.Tanh()
)
def forward(self, input):
# Use parallel computing across GPUs if the hardware
# had multiple available GPUs
if input.is_cuda and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output
| 35.229167 | 112 | 0.564755 |
85283afcf23ff587e6466a66537f6697024e3bee | 33,980 | py | Python | tensorflow_tts/models/fastspeech.py | speechcatch/TensorFlowTTS | 2f52a315a8c99ff018e9de1898937627040d557e | [
"Apache-2.0"
] | null | null | null | tensorflow_tts/models/fastspeech.py | speechcatch/TensorFlowTTS | 2f52a315a8c99ff018e9de1898937627040d557e | [
"Apache-2.0"
] | null | null | null | tensorflow_tts/models/fastspeech.py | speechcatch/TensorFlowTTS | 2f52a315a8c99ff018e9de1898937627040d557e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 The FastSpeech Authors, The HuggingFace Inc. team and Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow Model modules for FastSpeech."""
import numpy as np
import tensorflow as tf
from tensorflow_tts.models import BaseModel
def get_initializer(initializer_range=0.02):
"""Creates a `tf.initializers.truncated_normal` with the given range.
Args:
initializer_range: float, initializer range for stddev.
Returns:
TruncatedNormal initializer with stddev = `initializer_range`.
"""
return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)
def gelu(x):
"""Gaussian Error Linear unit."""
cdf = 0.5 * (1.0 + tf.math.erf(x / tf.math.sqrt(2.0)))
return x * cdf
def gelu_new(x):
"""Smoother gaussian Error Linear Unit."""
cdf = 0.5 * (1.0 + tf.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def swish(x):
"""Swish activation function."""
return tf.nn.swish(x)
def mish(x):
return x * tf.math.tanh(tf.math.softplus(x))
ACT2FN = {
"identity": tf.keras.layers.Activation("linear"),
"tanh": tf.keras.layers.Activation("tanh"),
"gelu": tf.keras.layers.Activation(gelu),
"relu": tf.keras.activations.relu,
"swish": tf.keras.layers.Activation(swish),
"gelu_new": tf.keras.layers.Activation(gelu_new),
"mish": tf.keras.layers.Activation(mish),
}
class TFEmbedding(tf.keras.layers.Embedding):
"""Faster version of embedding."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def call(self, inputs):
inputs = tf.cast(inputs, tf.int32)
outputs = tf.gather(self.embeddings, inputs)
return outputs
class TFFastSpeechEmbeddings(tf.keras.layers.Layer):
"""Construct charactor/phoneme/positional/speaker embeddings."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.hidden_size = config.encoder_self_attention_params.hidden_size
self.initializer_range = config.initializer_range
self.config = config
self.position_embeddings = TFEmbedding(
config.max_position_embeddings + 1,
self.hidden_size,
weights=[
self._sincos_embedding(
self.hidden_size, self.config.max_position_embeddings
)
],
name="position_embeddings",
trainable=False,
)
if config.n_speakers > 1:
self.encoder_speaker_embeddings = TFEmbedding(
config.n_speakers,
self.hidden_size,
embeddings_initializer=get_initializer(self.initializer_range),
name="speaker_embeddings",
)
self.speaker_fc = tf.keras.layers.Dense(
units=self.hidden_size, name="speaker_fc"
)
def build(self, input_shape):
"""Build shared charactor/phoneme embedding layers."""
with tf.name_scope("charactor_embeddings"):
self.charactor_embeddings = self.add_weight(
"weight",
shape=[self.vocab_size, self.hidden_size],
initializer=get_initializer(self.initializer_range),
)
super().build(input_shape)
def call(self, inputs, training=False):
"""Get charactor embeddings of inputs.
Args:
1. charactor, Tensor (int32) shape [batch_size, length].
2. speaker_id, Tensor (int32) shape [batch_size]
Returns:
Tensor (float32) shape [batch_size, length, embedding_size].
"""
return self._embedding(inputs, training=training)
def _embedding(self, inputs, training=False):
"""Applies embedding based on inputs tensor."""
input_ids, speaker_ids = inputs
input_shape = tf.shape(input_ids)
seq_length = input_shape[1]
position_ids = tf.range(1, seq_length + 1, dtype=tf.int32)[tf.newaxis, :]
# create embeddings
inputs_embeds = tf.gather(self.charactor_embeddings, input_ids)
position_embeddings = self.position_embeddings(position_ids)
# sum embedding
embeddings = inputs_embeds + tf.cast(position_embeddings, inputs_embeds.dtype)
if self.config.n_speakers > 1:
speaker_embeddings = self.encoder_speaker_embeddings(speaker_ids)
speaker_features = tf.math.softplus(self.speaker_fc(speaker_embeddings))
# extended speaker embeddings
extended_speaker_features = speaker_features[:, tf.newaxis, :]
embeddings += extended_speaker_features
return embeddings
def _sincos_embedding(
self, hidden_size, max_positional_embedding,
):
position_enc = np.array(
[
[
pos / np.power(10000, 2.0 * (i // 2) / hidden_size)
for i in range(hidden_size)
]
for pos in range(max_positional_embedding + 1)
]
)
position_enc[:, 0::2] = np.sin(position_enc[:, 0::2])
position_enc[:, 1::2] = np.cos(position_enc[:, 1::2])
# pad embedding.
position_enc[0] = 0.0
return position_enc
def resize_positional_embeddings(self, new_size):
self.position_embeddings = TFEmbedding(
new_size + 1,
self.hidden_size,
weights=[self._sincos_embedding(self.hidden_size, new_size)],
name="position_embeddings",
trainable=False,
)
class TFFastSpeechSelfAttention(tf.keras.layers.Layer):
"""Self attention module for fastspeech."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.all_head_size = self.num_attention_heads * config.attention_head_size
self.query = tf.keras.layers.Dense(
self.all_head_size,
kernel_initializer=get_initializer(config.initializer_range),
name="query",
)
self.key = tf.keras.layers.Dense(
self.all_head_size,
kernel_initializer=get_initializer(config.initializer_range),
name="key",
)
self.value = tf.keras.layers.Dense(
self.all_head_size,
kernel_initializer=get_initializer(config.initializer_range),
name="value",
)
self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob)
self.config = config
def transpose_for_scores(self, x, batch_size):
"""Transpose to calculate attention scores."""
x = tf.reshape(
x,
(batch_size, -1, self.num_attention_heads, self.config.attention_head_size),
)
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, inputs, training=False):
"""Call logic."""
hidden_states, attention_mask = inputs
batch_size = tf.shape(hidden_states)[0]
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
dk = tf.cast(
tf.shape(key_layer)[-1], attention_scores.dtype
) # scale attention_scores
attention_scores = attention_scores / tf.math.sqrt(dk)
if attention_mask is not None:
# extended_attention_masks for self attention encoder.
extended_attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]
extended_attention_mask = tf.cast(
extended_attention_mask, attention_scores.dtype
)
extended_attention_mask = (1.0 - extended_attention_mask) * -1e9
attention_scores = attention_scores + extended_attention_mask
# Normalize the attention scores to probabilities.
attention_probs = tf.nn.softmax(attention_scores, axis=-1)
attention_probs = self.dropout(attention_probs, training=training)
context_layer = tf.matmul(attention_probs, value_layer)
context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
context_layer = tf.reshape(context_layer, (batch_size, -1, self.all_head_size))
outputs = (
(context_layer, attention_probs)
if self.output_attentions
else (context_layer,)
)
return outputs
class TFFastSpeechSelfOutput(tf.keras.layers.Layer):
"""Fastspeech output of self attention module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
name="dense",
)
self.LayerNorm = tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="LayerNorm"
)
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def call(self, inputs, training=False):
"""Call logic."""
hidden_states, input_tensor = inputs
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TFFastSpeechAttention(tf.keras.layers.Layer):
"""Fastspeech attention module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.self_attention = TFFastSpeechSelfAttention(config, name="self")
self.dense_output = TFFastSpeechSelfOutput(config, name="output")
def call(self, inputs, training=False):
input_tensor, attention_mask = inputs
self_outputs = self.self_attention(
[input_tensor, attention_mask], training=training
)
attention_output = self.dense_output(
[self_outputs[0], input_tensor], training=training
)
masked_attention_output = attention_output * tf.cast(
tf.expand_dims(attention_mask, 2), dtype=attention_output.dtype
)
outputs = (masked_attention_output,) + self_outputs[
1:
] # add attentions if we output them
return outputs
class TFFastSpeechIntermediate(tf.keras.layers.Layer):
"""Intermediate representation module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.conv1d_1 = tf.keras.layers.Conv1D(
config.intermediate_size,
kernel_size=config.intermediate_kernel_size,
kernel_initializer=get_initializer(config.initializer_range),
padding="same",
name="conv1d_1",
)
self.conv1d_2 = tf.keras.layers.Conv1D(
config.hidden_size,
kernel_size=config.intermediate_kernel_size,
kernel_initializer=get_initializer(config.initializer_range),
padding="same",
name="conv1d_2",
)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def call(self, inputs):
"""Call logic."""
hidden_states, attention_mask = inputs
hidden_states = self.conv1d_1(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.conv1d_2(hidden_states)
masked_hidden_states = hidden_states * tf.cast(
tf.expand_dims(attention_mask, 2), dtype=hidden_states.dtype
)
return masked_hidden_states
class TFFastSpeechOutput(tf.keras.layers.Layer):
"""Output module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.LayerNorm = tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="LayerNorm"
)
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def call(self, inputs, training=False):
"""Call logic."""
hidden_states, input_tensor = inputs
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TFFastSpeechLayer(tf.keras.layers.Layer):
"""Fastspeech module (FFT module on the paper)."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.attention = TFFastSpeechAttention(config, name="attention")
self.intermediate = TFFastSpeechIntermediate(config, name="intermediate")
self.bert_output = TFFastSpeechOutput(config, name="output")
def call(self, inputs, training=False):
"""Call logic."""
hidden_states, attention_mask = inputs
attention_outputs = self.attention(
[hidden_states, attention_mask], training=training
)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(
[attention_output, attention_mask], training=training
)
layer_output = self.bert_output(
[intermediate_output, attention_output], training=training
)
masked_layer_output = layer_output * tf.cast(
tf.expand_dims(attention_mask, 2), dtype=layer_output.dtype
)
outputs = (masked_layer_output,) + attention_outputs[
1:
] # add attentions if we output them
return outputs
class TFFastSpeechEncoder(tf.keras.layers.Layer):
"""Fast Speech encoder module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = [
TFFastSpeechLayer(config, name="layer_._{}".format(i))
for i in range(config.num_hidden_layers)
]
def call(self, inputs, training=False):
"""Call logic."""
hidden_states, attention_mask = inputs
all_hidden_states = ()
all_attentions = ()
for _, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
[hidden_states, attention_mask], training=training
)
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # outputs, (hidden states), (attentions)
class TFFastSpeechDecoder(TFFastSpeechEncoder):
"""Fast Speech decoder module."""
def __init__(self, config, **kwargs):
self.is_compatible_encoder = kwargs.pop("is_compatible_encoder", True)
super().__init__(config, **kwargs)
self.config = config
# create decoder positional embedding
self.decoder_positional_embeddings = TFEmbedding(
config.max_position_embeddings + 1,
config.hidden_size,
weights=[self._sincos_embedding()],
name="position_embeddings",
trainable=False,
)
if self.is_compatible_encoder is False:
self.project_compatible_decoder = tf.keras.layers.Dense(
units=config.hidden_size, name="project_compatible_decoder"
)
if config.n_speakers > 1:
self.decoder_speaker_embeddings = TFEmbedding(
config.n_speakers,
config.hidden_size,
embeddings_initializer=get_initializer(config.initializer_range),
name="speaker_embeddings",
)
self.speaker_fc = tf.keras.layers.Dense(
units=config.hidden_size, name="speaker_fc"
)
def call(self, inputs, training=False):
hidden_states, speaker_ids, encoder_mask, decoder_pos = inputs
if self.is_compatible_encoder is False:
hidden_states = self.project_compatible_decoder(hidden_states)
# calculate new hidden states.
hidden_states += tf.cast(
self.decoder_positional_embeddings(decoder_pos), hidden_states.dtype
)
if self.config.n_speakers > 1:
speaker_embeddings = self.decoder_speaker_embeddings(speaker_ids)
speaker_features = tf.math.softplus(self.speaker_fc(speaker_embeddings))
# extended speaker embeddings
extended_speaker_features = speaker_features[:, tf.newaxis, :]
hidden_states += extended_speaker_features
return super().call([hidden_states, encoder_mask], training=training)
def _sincos_embedding(self):
position_enc = np.array(
[
[
pos / np.power(10000, 2.0 * (i // 2) / self.config.hidden_size)
for i in range(self.config.hidden_size)
]
for pos in range(self.config.max_position_embeddings + 1)
]
)
position_enc[:, 0::2] = np.sin(position_enc[:, 0::2])
position_enc[:, 1::2] = np.cos(position_enc[:, 1::2])
# pad embedding.
position_enc[0] = 0.0
return position_enc
class TFTacotronPostnet(tf.keras.layers.Layer):
"""Tacotron-2 postnet."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.conv_batch_norm = []
for i in range(config.n_conv_postnet):
conv = tf.keras.layers.Conv1D(
filters=config.postnet_conv_filters
if i < config.n_conv_postnet - 1
else config.num_mels,
kernel_size=config.postnet_conv_kernel_sizes,
padding="same",
name="conv_._{}".format(i),
)
batch_norm = tf.keras.layers.BatchNormalization(
axis=-1, name="batch_norm_._{}".format(i)
)
self.conv_batch_norm.append((conv, batch_norm))
self.dropout = tf.keras.layers.Dropout(
rate=config.postnet_dropout_rate, name="dropout"
)
self.activation = [tf.nn.tanh] * (config.n_conv_postnet - 1) + [tf.identity]
def call(self, inputs, training=False):
"""Call logic."""
outputs, mask = inputs
extended_mask = tf.cast(tf.expand_dims(mask, axis=2), outputs.dtype)
for i, (conv, bn) in enumerate(self.conv_batch_norm):
outputs = conv(outputs)
outputs = bn(outputs)
outputs = self.activation[i](outputs)
outputs = self.dropout(outputs, training=training)
return outputs * extended_mask
class TFFastSpeechDurationPredictor(tf.keras.layers.Layer):
"""FastSpeech duration predictor module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.conv_layers = []
for i in range(config.num_duration_conv_layers):
self.conv_layers.append(
tf.keras.layers.Conv1D(
config.duration_predictor_filters,
config.duration_predictor_kernel_sizes,
padding="same",
name="conv_._{}".format(i),
)
)
self.conv_layers.append(
tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="LayerNorm_._{}".format(i)
)
)
self.conv_layers.append(tf.keras.layers.Activation(tf.nn.relu6))
self.conv_layers.append(
tf.keras.layers.Dropout(config.duration_predictor_dropout_probs)
)
self.conv_layers_sequence = tf.keras.Sequential(self.conv_layers)
self.output_layer = tf.keras.layers.Dense(1)
def call(self, inputs, training=False):
"""Call logic."""
encoder_hidden_states, attention_mask = inputs
attention_mask = tf.cast(
tf.expand_dims(attention_mask, 2), encoder_hidden_states.dtype
)
# mask encoder hidden states
masked_encoder_hidden_states = encoder_hidden_states * attention_mask
# pass though first layer
outputs = self.conv_layers_sequence(masked_encoder_hidden_states)
outputs = self.output_layer(outputs)
masked_outputs = outputs * attention_mask
return tf.squeeze(tf.nn.relu6(masked_outputs), -1) # make sure positive value.
class TFFastSpeechLengthRegulator(tf.keras.layers.Layer):
"""FastSpeech lengthregulator module."""
def __init__(self, config, **kwargs):
"""Init variables."""
self.enable_tflite_convertible = kwargs.pop("enable_tflite_convertible", False)
super().__init__(**kwargs)
self.config = config
def call(self, inputs, training=False):
"""Call logic.
Args:
1. encoder_hidden_states, Tensor (float32) shape [batch_size, length, hidden_size]
2. durations_gt, Tensor (float32/int32) shape [batch_size, length]
"""
encoder_hidden_states, durations_gt = inputs
outputs, encoder_masks = self._length_regulator(
encoder_hidden_states, durations_gt
)
return outputs, encoder_masks
def _length_regulator(self, encoder_hidden_states, durations_gt):
"""Length regulator logic."""
sum_durations = tf.reduce_sum(durations_gt, axis=-1) # [batch_size]
max_durations = tf.reduce_max(sum_durations)
input_shape = tf.shape(encoder_hidden_states)
batch_size = input_shape[0]
hidden_size = input_shape[-1]
# initialize output hidden states and encoder masking.
if self.enable_tflite_convertible:
# There is only 1 batch in inference, so we don't have to use
# `tf.While` op with 3-D output tensor.
repeats = durations_gt[0]
real_length = tf.reduce_sum(repeats)
pad_size = max_durations - real_length
# masks : [max_durations]
masks = tf.sequence_mask([real_length], max_durations, dtype=tf.int32)
repeat_encoder_hidden_states = tf.repeat(
encoder_hidden_states[0], repeats=repeats, axis=0
)
repeat_encoder_hidden_states = tf.expand_dims(
tf.pad(repeat_encoder_hidden_states, [[0, pad_size], [0, 0]]), 0
) # [1, max_durations, hidden_size]
outputs = repeat_encoder_hidden_states
encoder_masks = masks
else:
outputs = tf.zeros(
shape=[0, max_durations, hidden_size], dtype=encoder_hidden_states.dtype
)
encoder_masks = tf.zeros(shape=[0, max_durations], dtype=tf.int32)
def condition(
i,
batch_size,
outputs,
encoder_masks,
encoder_hidden_states,
durations_gt,
max_durations,
):
return tf.less(i, batch_size)
def body(
i,
batch_size,
outputs,
encoder_masks,
encoder_hidden_states,
durations_gt,
max_durations,
):
repeats = durations_gt[i]
real_length = tf.reduce_sum(repeats)
pad_size = max_durations - real_length
masks = tf.sequence_mask([real_length], max_durations, dtype=tf.int32)
repeat_encoder_hidden_states = tf.repeat(
encoder_hidden_states[i], repeats=repeats, axis=0
)
repeat_encoder_hidden_states = tf.expand_dims(
tf.pad(repeat_encoder_hidden_states, [[0, pad_size], [0, 0]]), 0
) # [1, max_durations, hidden_size]
outputs = tf.concat([outputs, repeat_encoder_hidden_states], axis=0)
encoder_masks = tf.concat([encoder_masks, masks], axis=0)
return [
i + 1,
batch_size,
outputs,
encoder_masks,
encoder_hidden_states,
durations_gt,
max_durations,
]
# initialize iteration i.
i = tf.constant(0, dtype=tf.int32)
_, _, outputs, encoder_masks, _, _, _, = tf.while_loop(
condition,
body,
[
i,
batch_size,
outputs,
encoder_masks,
encoder_hidden_states,
durations_gt,
max_durations,
],
shape_invariants=[
i.get_shape(),
batch_size.get_shape(),
tf.TensorShape(
[
None,
None,
self.config.encoder_self_attention_params.hidden_size,
]
),
tf.TensorShape([None, None]),
encoder_hidden_states.get_shape(),
durations_gt.get_shape(),
max_durations.get_shape(),
],
)
return outputs, encoder_masks
class TFFastSpeech(BaseModel):
"""TF Fastspeech module."""
def __init__(self, config, **kwargs):
"""Init layers for fastspeech."""
self.enable_tflite_convertible = kwargs.pop("enable_tflite_convertible", False)
super().__init__(**kwargs)
self.embeddings = TFFastSpeechEmbeddings(config, name="embeddings")
self.encoder = TFFastSpeechEncoder(
config.encoder_self_attention_params, name="encoder"
)
self.duration_predictor = TFFastSpeechDurationPredictor(
config, dtype=tf.float32, name="duration_predictor"
)
self.length_regulator = TFFastSpeechLengthRegulator(
config,
enable_tflite_convertible=self.enable_tflite_convertible,
name="length_regulator",
)
self.decoder = TFFastSpeechDecoder(
config.decoder_self_attention_params,
is_compatible_encoder=config.encoder_self_attention_params.hidden_size
== config.decoder_self_attention_params.hidden_size,
name="decoder",
)
self.mel_dense = tf.keras.layers.Dense(
units=config.num_mels, dtype=tf.float32, name="mel_before"
)
self.postnet = TFTacotronPostnet(
config=config, dtype=tf.float32, name="postnet"
)
self.setup_inference_fn()
def _build(self):
"""Dummy input for building model."""
# fake inputs
input_ids = tf.convert_to_tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]], tf.int32)
speaker_ids = tf.convert_to_tensor([0], tf.int32)
duration_gts = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], tf.int32)
self(input_ids, speaker_ids, duration_gts)
def resize_positional_embeddings(self, new_size):
self.embeddings.resize_positional_embeddings(new_size)
self._build()
def call(
self, input_ids, speaker_ids, duration_gts, training=False, **kwargs,
):
"""Call logic."""
attention_mask = tf.math.not_equal(input_ids, 0)
embedding_output = self.embeddings([input_ids, speaker_ids], training=training)
encoder_output = self.encoder(
[embedding_output, attention_mask], training=training
)
last_encoder_hidden_states = encoder_output[0]
# duration predictor, here use last_encoder_hidden_states, u can use more hidden_states layers
# rather than just use last_hidden_states of encoder for duration_predictor.
duration_outputs = self.duration_predictor(
[last_encoder_hidden_states, attention_mask]
) # [batch_size, length]
length_regulator_outputs, encoder_masks = self.length_regulator(
[last_encoder_hidden_states, duration_gts], training=training
)
# create decoder positional embedding
decoder_pos = tf.range(
1, tf.shape(length_regulator_outputs)[1] + 1, dtype=tf.int32
)
masked_decoder_pos = tf.expand_dims(decoder_pos, 0) * encoder_masks
decoder_output = self.decoder(
[length_regulator_outputs, speaker_ids, encoder_masks, masked_decoder_pos],
training=training,
)
last_decoder_hidden_states = decoder_output[0]
# here u can use sum or concat more than 1 hidden states layers from decoder.
mel_before = self.mel_dense(last_decoder_hidden_states)
mel_after = (
self.postnet([mel_before, encoder_masks], training=training) + mel_before
)
outputs = (mel_before, mel_after, duration_outputs)
return outputs
def _inference(self, input_ids, speaker_ids, speed_ratios, **kwargs):
"""Call logic."""
attention_mask = tf.math.not_equal(input_ids, 0)
embedding_output = self.embeddings([input_ids, speaker_ids], training=False)
encoder_output = self.encoder(
[embedding_output, attention_mask], training=False
)
last_encoder_hidden_states = encoder_output[0]
# duration predictor, here use last_encoder_hidden_states, u can use more hidden_states layers
# rather than just use last_hidden_states of encoder for duration_predictor.
duration_outputs = self.duration_predictor(
[last_encoder_hidden_states, attention_mask]
) # [batch_size, length]
# duration_outputs = tf.math.exp(duration_outputs) - 1.0 # fix
if speed_ratios is None:
speed_ratios = tf.convert_to_tensor(np.array([1.0]), dtype=tf.float32)
speed_ratios = tf.expand_dims(speed_ratios, 1)
duration_outputs = tf.cast(
tf.math.round(duration_outputs * speed_ratios), tf.int32
)
length_regulator_outputs, encoder_masks = self.length_regulator(
[last_encoder_hidden_states, duration_outputs], training=False
)
# create decoder positional embedding
decoder_pos = tf.range(
1, tf.shape(length_regulator_outputs)[1] + 1, dtype=tf.int32
)
masked_decoder_pos = tf.expand_dims(decoder_pos, 0) * encoder_masks
decoder_output = self.decoder(
[length_regulator_outputs, speaker_ids, encoder_masks, masked_decoder_pos],
training=False,
)
last_decoder_hidden_states = decoder_output[0]
# here u can use sum or concat more than 1 hidden states layers from decoder.
mel_before = self.mel_dense(last_decoder_hidden_states)
mel_after = (
self.postnet([mel_before, encoder_masks], training=False) + mel_before
)
outputs = (mel_before, mel_after, duration_outputs)
return outputs
def setup_inference_fn(self):
self.inference = tf.function(
self._inference,
experimental_relax_shapes=True,
input_signature=[
tf.TensorSpec(shape=[None, None], dtype=tf.int32, name="input_ids"),
tf.TensorSpec(shape=[None,], dtype=tf.int32, name="speaker_ids"),
tf.TensorSpec(shape=[None,], dtype=tf.float32, name="speed_ratios"),
],
)
self.inference_tflite = tf.function(
self._inference,
experimental_relax_shapes=True,
input_signature=[
tf.TensorSpec(shape=[1, None], dtype=tf.int32, name="input_ids"),
tf.TensorSpec(shape=[1,], dtype=tf.int32, name="speaker_ids"),
tf.TensorSpec(shape=[1,], dtype=tf.float32, name="speed_ratios"),
],
)
| 37.381738 | 102 | 0.615686 |
e6ca616c63b56737940af6232e7e6088f3026a18 | 42,423 | py | Python | tools/eval_rcnn.py | kaancolak/PointRCNN | eeca9bef79baaebfef3c45b1ea929168e37c3071 | [
"MIT"
] | null | null | null | tools/eval_rcnn.py | kaancolak/PointRCNN | eeca9bef79baaebfef3c45b1ea929168e37c3071 | [
"MIT"
] | null | null | null | tools/eval_rcnn.py | kaancolak/PointRCNN | eeca9bef79baaebfef3c45b1ea929168e37c3071 | [
"MIT"
] | null | null | null | import _init_path
import os
import numpy as np
import torch
from torch.utils.data import DataLoader
import torch.nn.functional as F
from lib.net.point_rcnn import PointRCNN
from lib.datasets.kitti_rcnn_dataset import KittiRCNNDataset
import tools.train_utils.train_utils as train_utils
from lib.utils.bbox_transform import decode_bbox_target
from tools.kitti_object_eval_python.evaluate import evaluate as kitti_evaluate
from lib.config import cfg, cfg_from_file, save_config_to_file, cfg_from_list
import argparse
import lib.utils.kitti_utils as kitti_utils
import lib.utils.iou3d.iou3d_utils as iou3d_utils
from datetime import datetime
import logging
import re
import glob
import time
from tensorboardX import SummaryWriter
import tqdm
np.random.seed(1024) # set the same seed
parser = argparse.ArgumentParser(description="arg parser")
parser.add_argument('--cfg_file', type=str, default='cfgs/default.yml', help='specify the config for evaluation')
parser.add_argument("--eval_mode", type=str, default='rpn', required=True, help="specify the evaluation mode")
parser.add_argument('--eval_all', action='store_true', default=False, help='whether to evaluate all checkpoints')
parser.add_argument('--test', action='store_true', default=False, help='evaluate without ground truth')
parser.add_argument("--ckpt", type=str, default=None, help="specify a checkpoint to be evaluated")
parser.add_argument("--rpn_ckpt", type=str, default=None, help="specify the checkpoint of rpn if trained separated")
parser.add_argument("--rcnn_ckpt", type=str, default=None, help="specify the checkpoint of rcnn if trained separated")
parser.add_argument('--batch_size', type=int, default=1, help='batch size for evaluation')
parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')
parser.add_argument("--extra_tag", type=str, default='default', help="extra tag for multiple evaluation")
parser.add_argument('--output_dir', type=str, default=None, help='specify an output directory if needed')
parser.add_argument("--ckpt_dir", type=str, default=None, help="specify a ckpt directory to be evaluated if needed")
parser.add_argument('--save_result', action='store_true', default=False, help='save evaluation results to files')
parser.add_argument('--save_rpn_feature', action='store_true', default=False,
help='save features for separately rcnn training and evaluation')
parser.add_argument('--random_select', action='store_true', default=True, help='sample to the same number of points')
parser.add_argument('--start_epoch', default=0, type=int, help='ignore the checkpoint smaller than this epoch')
parser.add_argument("--rcnn_eval_roi_dir", type=str, default=None,
help='specify the saved rois for rcnn evaluation when using rcnn_offline mode')
parser.add_argument("--rcnn_eval_feature_dir", type=str, default=None,
help='specify the saved features for rcnn evaluation when using rcnn_offline mode')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
args = parser.parse_args()
def create_logger(log_file):
log_format = '%(asctime)s %(levelname)5s %(message)s'
logging.basicConfig(level=logging.INFO, format=log_format, filename=log_file)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(logging.Formatter(log_format))
logging.getLogger(__name__).addHandler(console)
return logging.getLogger(__name__)
def save_kitti_format(sample_id, calib, bbox3d, kitti_output_dir, scores, img_shape):
corners3d = kitti_utils.boxes3d_to_corners3d(bbox3d)
img_boxes, _ = calib.corners3d_to_img_boxes(corners3d)
img_boxes[:, 0] = np.clip(img_boxes[:, 0], 0, img_shape[1] - 1)
img_boxes[:, 1] = np.clip(img_boxes[:, 1], 0, img_shape[0] - 1)
img_boxes[:, 2] = np.clip(img_boxes[:, 2], 0, img_shape[1] - 1)
img_boxes[:, 3] = np.clip(img_boxes[:, 3], 0, img_shape[0] - 1)
img_boxes_w = img_boxes[:, 2] - img_boxes[:, 0]
img_boxes_h = img_boxes[:, 3] - img_boxes[:, 1]
box_valid_mask = np.logical_and(img_boxes_w < img_shape[1] * 0.8, img_boxes_h < img_shape[0] * 0.8)
kitti_output_file = os.path.join(kitti_output_dir, '%06d.txt' % sample_id)
with open(kitti_output_file, 'w') as f:
for k in range(bbox3d.shape[0]):
if box_valid_mask[k] == 0:
continue
x, z, ry = bbox3d[k, 0], bbox3d[k, 2], bbox3d[k, 6]
beta = np.arctan2(z, x)
alpha = -np.sign(beta) * np.pi / 2 + beta + ry
print('%s -1 -1 %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f' %
(cfg.CLASSES, alpha, img_boxes[k, 0], img_boxes[k, 1], img_boxes[k, 2], img_boxes[k, 3],
bbox3d[k, 3], bbox3d[k, 4], bbox3d[k, 5], bbox3d[k, 0], bbox3d[k, 1], bbox3d[k, 2],
bbox3d[k, 6], scores[k]), file=f)
def save_rpn_features(seg_result, rpn_scores_raw, pts_features, backbone_xyz, backbone_features, kitti_features_dir,
sample_id):
pts_intensity = pts_features[:, 0]
output_file = os.path.join(kitti_features_dir, '%06d.npy' % sample_id)
xyz_file = os.path.join(kitti_features_dir, '%06d_xyz.npy' % sample_id)
seg_file = os.path.join(kitti_features_dir, '%06d_seg.npy' % sample_id)
intensity_file = os.path.join(kitti_features_dir, '%06d_intensity.npy' % sample_id)
np.save(output_file, backbone_features)
np.save(xyz_file, backbone_xyz)
np.save(seg_file, seg_result)
np.save(intensity_file, pts_intensity)
rpn_scores_raw_file = os.path.join(kitti_features_dir, '%06d_rawscore.npy' % sample_id)
np.save(rpn_scores_raw_file, rpn_scores_raw)
def eval_one_epoch_rpn(model, dataloader, epoch_id, result_dir, logger):
np.random.seed(1024)
mode = 'TEST' if args.test else 'EVAL'
if args.save_rpn_feature:
kitti_features_dir = os.path.join(result_dir, 'features')
os.makedirs(kitti_features_dir, exist_ok=True)
if args.save_result or args.save_rpn_feature:
kitti_output_dir = os.path.join(result_dir, 'detections', 'data')
seg_output_dir = os.path.join(result_dir, 'seg_result')
os.makedirs(kitti_output_dir, exist_ok=True)
os.makedirs(seg_output_dir, exist_ok=True)
logger.info('---- EPOCH %s RPN EVALUATION ----' % epoch_id)
model.eval()
thresh_list = [0.1, 0.3, 0.5, 0.7, 0.9]
total_recalled_bbox_list, total_gt_bbox = [0] * 5, 0
dataset = dataloader.dataset
cnt = max_num = rpn_iou_avg = 0
progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval')
for data in dataloader:
sample_id_list, pts_rect, pts_features, pts_input = \
data['sample_id'], data['pts_rect'], data['pts_features'], data['pts_input']
sample_id = sample_id_list[0]
cnt += len(sample_id_list)
if not args.test:
rpn_cls_label, rpn_reg_label = data['rpn_cls_label'], data['rpn_reg_label']
gt_boxes3d = data['gt_boxes3d']
rpn_cls_label = torch.from_numpy(rpn_cls_label).cuda(non_blocking=True).long()
if gt_boxes3d.shape[1] == 0: # (B, M, 7)
pass
# logger.info('%06d: No gt box' % sample_id)
else:
gt_boxes3d = torch.from_numpy(gt_boxes3d).cuda(non_blocking=True).float()
inputs = torch.from_numpy(pts_input).cuda(non_blocking=True).float()
input_data = {'pts_input': inputs}
# model inference
ret_dict = model(input_data)
rpn_cls, rpn_reg = ret_dict['rpn_cls'], ret_dict['rpn_reg']
backbone_xyz, backbone_features = ret_dict['backbone_xyz'], ret_dict['backbone_features']
rpn_scores_raw = rpn_cls[:, :, 0]
rpn_scores = torch.sigmoid(rpn_scores_raw)
seg_result = (rpn_scores > cfg.RPN.SCORE_THRESH).long()
# proposal layer
rois, roi_scores_raw = model.rpn.proposal_layer(rpn_scores_raw, rpn_reg, backbone_xyz) # (B, M, 7)
batch_size = rois.shape[0]
# calculate recall and save results to file
for bs_idx in range(batch_size):
cur_sample_id = sample_id_list[bs_idx]
cur_scores_raw = roi_scores_raw[bs_idx] # (N)
cur_boxes3d = rois[bs_idx] # (N, 7)
cur_seg_result = seg_result[bs_idx]
cur_pts_rect = pts_rect[bs_idx]
# calculate recall
if not args.test:
cur_rpn_cls_label = rpn_cls_label[bs_idx]
cur_gt_boxes3d = gt_boxes3d[bs_idx]
k = cur_gt_boxes3d.__len__() - 1
while k > 0 and cur_gt_boxes3d[k].sum() == 0:
k -= 1
cur_gt_boxes3d = cur_gt_boxes3d[:k + 1]
recalled_num = 0
if cur_gt_boxes3d.shape[0] > 0:
iou3d = iou3d_utils.boxes_iou3d_gpu(cur_boxes3d, cur_gt_boxes3d[:, 0:7])
gt_max_iou, _ = iou3d.max(dim=0)
for idx, thresh in enumerate(thresh_list):
total_recalled_bbox_list[idx] += (gt_max_iou > thresh).sum().item()
recalled_num = (gt_max_iou > 0.7).sum().item()
total_gt_bbox += cur_gt_boxes3d.__len__()
fg_mask = cur_rpn_cls_label > 0
correct = ((cur_seg_result == cur_rpn_cls_label) & fg_mask).sum().float()
union = fg_mask.sum().float() + (cur_seg_result > 0).sum().float() - correct
rpn_iou = correct / torch.clamp(union, min=1.0)
rpn_iou_avg += rpn_iou.item()
# save result
if args.save_rpn_feature:
# save features to file
save_rpn_features(seg_result[bs_idx].float().cpu().numpy(),
rpn_scores_raw[bs_idx].float().cpu().numpy(),
pts_features[bs_idx],
backbone_xyz[bs_idx].cpu().numpy(),
backbone_features[bs_idx].cpu().numpy().transpose(1, 0),
kitti_features_dir, cur_sample_id)
if args.save_result or args.save_rpn_feature:
cur_pred_cls = cur_seg_result.cpu().numpy()
output_file = os.path.join(seg_output_dir, '%06d.npy' % cur_sample_id)
if not args.test:
cur_gt_cls = cur_rpn_cls_label.cpu().numpy()
output_data = np.concatenate(
(cur_pts_rect.reshape(-1, 3), cur_gt_cls.reshape(-1, 1), cur_pred_cls.reshape(-1, 1)), axis=1)
else:
output_data = np.concatenate((cur_pts_rect.reshape(-1, 3), cur_pred_cls.reshape(-1, 1)), axis=1)
np.save(output_file, output_data.astype(np.float16))
# save as kitti format
calib = dataset.get_calib(cur_sample_id)
cur_boxes3d = cur_boxes3d.cpu().numpy()
image_shape = dataset.get_image_shape(cur_sample_id)
save_kitti_format(cur_sample_id, calib, cur_boxes3d, kitti_output_dir, cur_scores_raw, image_shape)
disp_dict = {'mode': mode, 'recall': '%d/%d' % (total_recalled_bbox_list[3], total_gt_bbox),
'rpn_iou': rpn_iou_avg / max(cnt, 1.0)}
progress_bar.set_postfix(disp_dict)
progress_bar.update()
progress_bar.close()
logger.info(str(datetime.now()))
logger.info('-------------------performance of epoch %s---------------------' % epoch_id)
logger.info('max number of objects: %d' % max_num)
logger.info('rpn iou avg: %f' % (rpn_iou_avg / max(cnt, 1.0)))
ret_dict = {'max_obj_num': max_num, 'rpn_iou': rpn_iou_avg / cnt}
for idx, thresh in enumerate(thresh_list):
cur_recall = total_recalled_bbox_list[idx] / max(total_gt_bbox, 1.0)
logger.info('total bbox recall(thresh=%.3f): %d / %d = %f' % (thresh, total_recalled_bbox_list[idx],
total_gt_bbox, cur_recall))
ret_dict['rpn_recall(thresh=%.2f)' % thresh] = cur_recall
logger.info('result is saved to: %s' % result_dir)
return ret_dict
def eval_one_epoch_rcnn(model, dataloader, epoch_id, result_dir, logger):
np.random.seed(1024)
MEAN_SIZE = torch.from_numpy(cfg.CLS_MEAN_SIZE[0]).cuda()
mode = 'TEST' if args.test else 'EVAL'
final_output_dir = os.path.join(result_dir, 'final_result', 'data')
os.makedirs(final_output_dir, exist_ok=True)
if args.save_result:
roi_output_dir = os.path.join(result_dir, 'roi_result', 'data')
refine_output_dir = os.path.join(result_dir, 'refine_result', 'data')
os.makedirs(roi_output_dir, exist_ok=True)
os.makedirs(refine_output_dir, exist_ok=True)
logger.info('---- EPOCH %s RCNN EVALUATION ----' % epoch_id)
model.eval()
thresh_list = [0.1, 0.3, 0.5, 0.7, 0.9]
total_recalled_bbox_list, total_gt_bbox = [0] * 5, 0
total_roi_recalled_bbox_list = [0] * 5
dataset = dataloader.dataset
cnt = final_total = total_cls_acc = total_cls_acc_refined = 0
progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval')
for data in dataloader:
sample_id = data['sample_id']
cnt += 1
assert args.batch_size == 1, 'Only support bs=1 here'
input_data = {}
for key, val in data.items():
if key != 'sample_id':
input_data[key] = torch.from_numpy(val).contiguous().cuda(non_blocking=True).float()
roi_boxes3d = input_data['roi_boxes3d']
roi_scores = input_data['roi_scores']
if cfg.RCNN.ROI_SAMPLE_JIT:
for key, val in input_data.items():
if key in ['gt_iou', 'gt_boxes3d']:
continue
input_data[key] = input_data[key].unsqueeze(dim=0)
else:
pts_input = torch.cat((input_data['pts_input'], input_data['pts_features']), dim=-1)
input_data['pts_input'] = pts_input
ret_dict = model(input_data)
rcnn_cls = ret_dict['rcnn_cls']
rcnn_reg = ret_dict['rcnn_reg']
# bounding box regression
anchor_size = MEAN_SIZE
if cfg.RCNN.SIZE_RES_ON_ROI:
roi_size = input_data['roi_size']
anchor_size = roi_size
pred_boxes3d = decode_bbox_target(roi_boxes3d, rcnn_reg,
anchor_size=anchor_size,
loc_scope=cfg.RCNN.LOC_SCOPE,
loc_bin_size=cfg.RCNN.LOC_BIN_SIZE,
num_head_bin=cfg.RCNN.NUM_HEAD_BIN,
get_xz_fine=True, get_y_by_bin=cfg.RCNN.LOC_Y_BY_BIN,
loc_y_scope=cfg.RCNN.LOC_Y_SCOPE, loc_y_bin_size=cfg.RCNN.LOC_Y_BIN_SIZE,
get_ry_fine=True)
# scoring
if rcnn_cls.shape[1] == 1:
raw_scores = rcnn_cls.view(-1)
norm_scores = torch.sigmoid(raw_scores)
pred_classes = (norm_scores > cfg.RCNN.SCORE_THRESH).long()
else:
pred_classes = torch.argmax(rcnn_cls, dim=1).view(-1)
cls_norm_scores = F.softmax(rcnn_cls, dim=1)
raw_scores = rcnn_cls[:, pred_classes]
norm_scores = cls_norm_scores[:, pred_classes]
# evaluation
disp_dict = {'mode': mode}
if not args.test:
gt_boxes3d = input_data['gt_boxes3d']
gt_iou = input_data['gt_iou']
# calculate recall
gt_num = gt_boxes3d.shape[0]
if gt_num > 0:
iou3d = iou3d_utils.boxes_iou3d_gpu(pred_boxes3d, gt_boxes3d)
gt_max_iou, _ = iou3d.max(dim=0)
refined_iou, _ = iou3d.max(dim=1)
for idx, thresh in enumerate(thresh_list):
total_recalled_bbox_list[idx] += (gt_max_iou > thresh).sum().item()
recalled_num = (gt_max_iou > 0.7).sum().item()
total_gt_bbox += gt_num
iou3d_in = iou3d_utils.boxes_iou3d_gpu(roi_boxes3d, gt_boxes3d)
gt_max_iou_in, _ = iou3d_in.max(dim=0)
for idx, thresh in enumerate(thresh_list):
total_roi_recalled_bbox_list[idx] += (gt_max_iou_in > thresh).sum().item()
# classification accuracy
cls_label = (gt_iou > cfg.RCNN.CLS_FG_THRESH).float()
cls_valid_mask = ((gt_iou >= cfg.RCNN.CLS_FG_THRESH) | (gt_iou <= cfg.RCNN.CLS_BG_THRESH)).float()
cls_acc = ((pred_classes == cls_label.long()).float() * cls_valid_mask).sum() / max(cls_valid_mask.sum(), 1.0)
iou_thresh = 0.7 if cfg.CLASSES == 'Car' else 0.5
cls_label_refined = (gt_iou >= iou_thresh).float()
cls_acc_refined = (pred_classes == cls_label_refined.long()).float().sum() / max(cls_label_refined.shape[0], 1.0)
total_cls_acc += cls_acc.item()
total_cls_acc_refined += cls_acc_refined.item()
disp_dict['recall'] = '%d/%d' % (total_recalled_bbox_list[3], total_gt_bbox)
disp_dict['cls_acc_refined'] = '%.2f' % cls_acc_refined.item()
progress_bar.set_postfix(disp_dict)
progress_bar.update()
image_shape = dataset.get_image_shape(sample_id)
if args.save_result:
# save roi and refine results
roi_boxes3d_np = roi_boxes3d.cpu().numpy()
pred_boxes3d_np = pred_boxes3d.cpu().numpy()
calib = dataset.get_calib(sample_id)
save_kitti_format(sample_id, calib, roi_boxes3d_np, roi_output_dir, roi_scores, image_shape)
save_kitti_format(sample_id, calib, pred_boxes3d_np, refine_output_dir, raw_scores.cpu().numpy(),
image_shape)
# NMS and scoring
# scores thresh
inds = norm_scores > cfg.RCNN.SCORE_THRESH
if inds.sum() == 0:
continue
pred_boxes3d_selected = pred_boxes3d[inds]
raw_scores_selected = raw_scores[inds]
# NMS thresh
boxes_bev_selected = kitti_utils.boxes3d_to_bev_torch(pred_boxes3d_selected)
keep_idx = iou3d_utils.nms_gpu(boxes_bev_selected, raw_scores_selected, cfg.RCNN.NMS_THRESH)
pred_boxes3d_selected = pred_boxes3d_selected[keep_idx]
scores_selected = raw_scores_selected[keep_idx]
pred_boxes3d_selected, scores_selected = pred_boxes3d_selected.cpu().numpy(), scores_selected.cpu().numpy()
calib = dataset.get_calib(sample_id)
final_total += pred_boxes3d_selected.shape[0]
save_kitti_format(sample_id, calib, pred_boxes3d_selected, final_output_dir, scores_selected, image_shape)
progress_bar.close()
# dump empty files
split_file = os.path.join(dataset.imageset_dir, '..', '..', 'ImageSets', dataset.split + '.txt')
split_file = os.path.abspath(split_file)
image_idx_list = [x.strip() for x in open(split_file).readlines()]
empty_cnt = 0
for k in range(image_idx_list.__len__()):
cur_file = os.path.join(final_output_dir, '%s.txt' % image_idx_list[k])
if not os.path.exists(cur_file):
with open(cur_file, 'w') as temp_f:
pass
empty_cnt += 1
logger.info('empty_cnt=%d: dump empty file %s' % (empty_cnt, cur_file))
ret_dict = {'empty_cnt': empty_cnt}
logger.info('-------------------performance of epoch %s---------------------' % epoch_id)
logger.info(str(datetime.now()))
avg_cls_acc = (total_cls_acc / max(cnt, 1.0))
avg_cls_acc_refined = (total_cls_acc_refined / max(cnt, 1.0))
avg_det_num = (final_total / max(cnt, 1.0))
logger.info('final average detections: %.3f' % avg_det_num)
logger.info('final average cls acc: %.3f' % avg_cls_acc)
logger.info('final average cls acc refined: %.3f' % avg_cls_acc_refined)
ret_dict['rcnn_cls_acc'] = avg_cls_acc
ret_dict['rcnn_cls_acc_refined'] = avg_cls_acc_refined
ret_dict['rcnn_avg_num'] = avg_det_num
for idx, thresh in enumerate(thresh_list):
cur_roi_recall = total_roi_recalled_bbox_list[idx] / max(total_gt_bbox, 1.0)
logger.info('total roi bbox recall(thresh=%.3f): %d / %d = %f' % (thresh, total_roi_recalled_bbox_list[idx],
total_gt_bbox, cur_roi_recall))
ret_dict['rpn_recall(thresh=%.2f)' % thresh] = cur_roi_recall
for idx, thresh in enumerate(thresh_list):
cur_recall = total_recalled_bbox_list[idx] / max(total_gt_bbox, 1.0)
logger.info('total bbox recall(thresh=%.3f): %d / %d = %f' % (thresh, total_recalled_bbox_list[idx],
total_gt_bbox, cur_recall))
ret_dict['rcnn_recall(thresh=%.2f)' % thresh] = cur_recall
if cfg.TEST.SPLIT != 'test':
logger.info('Averate Precision:')
name_to_class = {'Car': 0, 'Pedestrian': 1, 'Cyclist': 2}
ap_result_str, ap_dict = kitti_evaluate(dataset.label_dir, final_output_dir, label_split_file=split_file,
current_class=name_to_class[cfg.CLASSES])
logger.info(ap_result_str)
ret_dict.update(ap_dict)
logger.info('result is saved to: %s' % result_dir)
return ret_dict
def eval_one_epoch_joint(model, dataloader, epoch_id, result_dir, logger):
np.random.seed(666)
MEAN_SIZE = torch.from_numpy(cfg.CLS_MEAN_SIZE[0]).cuda()
mode = 'TEST' if args.test else 'EVAL'
final_output_dir = os.path.join(result_dir, 'final_result', 'data')
os.makedirs(final_output_dir, exist_ok=True)
if args.save_result:
roi_output_dir = os.path.join(result_dir, 'roi_result', 'data')
refine_output_dir = os.path.join(result_dir, 'refine_result', 'data')
rpn_output_dir = os.path.join(result_dir, 'rpn_result', 'data')
os.makedirs(rpn_output_dir, exist_ok=True)
os.makedirs(roi_output_dir, exist_ok=True)
os.makedirs(refine_output_dir, exist_ok=True)
logger.info('---- EPOCH %s JOINT EVALUATION ----' % epoch_id)
logger.info('==> Output file: %s' % result_dir)
model.eval()
thresh_list = [0.1, 0.3, 0.5, 0.7, 0.9]
total_recalled_bbox_list, total_gt_bbox = [0] * 5, 0
total_roi_recalled_bbox_list = [0] * 5
dataset = dataloader.dataset
cnt = final_total = total_cls_acc = total_cls_acc_refined = total_rpn_iou = 0
progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval')
# print(type(dataloader))
for data in dataloader:
cnt += 1
# print(data)
sample_id, pts_rect, pts_features, pts_input = \
data['sample_id'], data['pts_rect'], data['pts_features'], data['pts_input']
batch_size = len(sample_id)
print(pts_input.shape)
print(type(pts_input))
inputs = torch.from_numpy(pts_input).cuda(non_blocking=True).float()
print("---------")
print(inputs.shape)
print("Inputs shape :")
print(inputs)
# print(inputs.size)
input_data = {'pts_input': inputs}
# print(input_data)
# model inference
ret_dict = model(input_data)
# print("-----------------------------")
# print("-----------------------------")
# print("-----------------------------")
#
# print(ret_dict)
print("---------")
roi_scores_raw = ret_dict['roi_scores_raw'] # (B, M)
roi_boxes3d = ret_dict['rois'] # (B, M, 7)
# print("-----------------------------")
# print("-----------------------------")
# print("-----------------------------")
# print(roi_boxes3d)
seg_result = ret_dict['seg_result'].long() # (B, N)
seg_result_numpy = seg_result.cpu().detach().numpy()
print(seg_result_numpy.shape)
counter = 0
counter2 = 0
for x in np.nditer(seg_result_numpy):
if x == 1:
counter = counter + 1
elif x == 0:
counter2 = counter2 + 1
print(counter)
print(counter2)
rcnn_cls = ret_dict['rcnn_cls'].view(batch_size, -1, ret_dict['rcnn_cls'].shape[1])
rcnn_reg = ret_dict['rcnn_reg'].view(batch_size, -1, ret_dict['rcnn_reg'].shape[1]) # (B, M, C)
# bounding box regression
anchor_size = MEAN_SIZE
if cfg.RCNN.SIZE_RES_ON_ROI:
assert False
pred_boxes3d = decode_bbox_target(roi_boxes3d.view(-1, 7), rcnn_reg.view(-1, rcnn_reg.shape[-1]),
anchor_size=anchor_size,
loc_scope=cfg.RCNN.LOC_SCOPE,
loc_bin_size=cfg.RCNN.LOC_BIN_SIZE,
num_head_bin=cfg.RCNN.NUM_HEAD_BIN,
get_xz_fine=True, get_y_by_bin=cfg.RCNN.LOC_Y_BY_BIN,
loc_y_scope=cfg.RCNN.LOC_Y_SCOPE, loc_y_bin_size=cfg.RCNN.LOC_Y_BIN_SIZE,
get_ry_fine=True).view(batch_size, -1, 7)
# print(pred_boxes3d)
# scoring
if rcnn_cls.shape[2] == 1:
raw_scores = rcnn_cls # (B, M, 1)
norm_scores = torch.sigmoid(raw_scores)
pred_classes = (norm_scores > cfg.RCNN.SCORE_THRESH).long()
else:
pred_classes = torch.argmax(rcnn_cls, dim=1).view(-1)
cls_norm_scores = F.softmax(rcnn_cls, dim=1)
raw_scores = rcnn_cls[:, pred_classes]
norm_scores = cls_norm_scores[:, pred_classes]
# evaluation
recalled_num = gt_num = rpn_iou = 0
if not args.test:
if not cfg.RPN.FIXED:
rpn_cls_label, rpn_reg_label = data['rpn_cls_label'], data['rpn_reg_label']
rpn_cls_label = torch.from_numpy(rpn_cls_label).cuda(non_blocking=True).long()
gt_boxes3d = data['gt_boxes3d']
for k in range(batch_size):
# calculate recall
cur_gt_boxes3d = gt_boxes3d[k]
tmp_idx = cur_gt_boxes3d.__len__() - 1
while tmp_idx >= 0 and cur_gt_boxes3d[tmp_idx].sum() == 0:
tmp_idx -= 1
if tmp_idx >= 0:
cur_gt_boxes3d = cur_gt_boxes3d[:tmp_idx + 1]
cur_gt_boxes3d = torch.from_numpy(cur_gt_boxes3d).cuda(non_blocking=True).float()
iou3d = iou3d_utils.boxes_iou3d_gpu(pred_boxes3d[k], cur_gt_boxes3d)
gt_max_iou, _ = iou3d.max(dim=0)
refined_iou, _ = iou3d.max(dim=1)
for idx, thresh in enumerate(thresh_list):
total_recalled_bbox_list[idx] += (gt_max_iou > thresh).sum().item()
recalled_num += (gt_max_iou > 0.7).sum().item()
gt_num += cur_gt_boxes3d.shape[0]
total_gt_bbox += cur_gt_boxes3d.shape[0]
# original recall
iou3d_in = iou3d_utils.boxes_iou3d_gpu(roi_boxes3d[k], cur_gt_boxes3d)
gt_max_iou_in, _ = iou3d_in.max(dim=0)
for idx, thresh in enumerate(thresh_list):
total_roi_recalled_bbox_list[idx] += (gt_max_iou_in > thresh).sum().item()
if not cfg.RPN.FIXED:
fg_mask = rpn_cls_label > 0
correct = ((seg_result == rpn_cls_label) & fg_mask).sum().float()
union = fg_mask.sum().float() + (seg_result > 0).sum().float() - correct
rpn_iou = correct / torch.clamp(union, min=1.0)
total_rpn_iou += rpn_iou.item()
disp_dict = {'mode': mode, 'recall': '%d/%d' % (total_recalled_bbox_list[3], total_gt_bbox)}
progress_bar.set_postfix(disp_dict)
progress_bar.update()
if args.save_result:
# save roi and refine results
roi_boxes3d_np = roi_boxes3d.cpu().numpy()
pred_boxes3d_np = pred_boxes3d.cpu().numpy()
roi_scores_raw_np = roi_scores_raw.cpu().numpy()
raw_scores_np = raw_scores.cpu().numpy()
rpn_cls_np = ret_dict['rpn_cls'].cpu().numpy()
rpn_xyz_np = ret_dict['backbone_xyz'].cpu().numpy()
seg_result_np = seg_result.cpu().numpy()
output_data = np.concatenate((rpn_xyz_np, rpn_cls_np.reshape(batch_size, -1, 1),
seg_result_np.reshape(batch_size, -1, 1)), axis=2)
for k in range(batch_size):
cur_sample_id = sample_id[k]
calib = dataset.get_calib(cur_sample_id)
image_shape = dataset.get_image_shape(cur_sample_id)
save_kitti_format(cur_sample_id, calib, roi_boxes3d_np[k], roi_output_dir,
roi_scores_raw_np[k], image_shape)
save_kitti_format(cur_sample_id, calib, pred_boxes3d_np[k], refine_output_dir,
raw_scores_np[k], image_shape)
output_file = os.path.join(rpn_output_dir, '%06d.npy' % cur_sample_id)
np.save(output_file, output_data.astype(np.float32))
# scores thresh
inds = norm_scores > cfg.RCNN.SCORE_THRESH
for k in range(batch_size):
cur_inds = inds[k].view(-1)
if cur_inds.sum() == 0:
continue
pred_boxes3d_selected = pred_boxes3d[k, cur_inds]
raw_scores_selected = raw_scores[k, cur_inds]
norm_scores_selected = norm_scores[k, cur_inds]
# NMS thresh
# rotated nms
boxes_bev_selected = kitti_utils.boxes3d_to_bev_torch(pred_boxes3d_selected)
keep_idx = iou3d_utils.nms_gpu(boxes_bev_selected, raw_scores_selected, cfg.RCNN.NMS_THRESH).view(-1)
pred_boxes3d_selected = pred_boxes3d_selected[keep_idx]
scores_selected = raw_scores_selected[keep_idx]
pred_boxes3d_selected, scores_selected = pred_boxes3d_selected.cpu().numpy(), scores_selected.cpu().numpy()
cur_sample_id = sample_id[k]
calib = dataset.get_calib(cur_sample_id)
final_total += pred_boxes3d_selected.shape[0]
image_shape = dataset.get_image_shape(cur_sample_id)
save_kitti_format(cur_sample_id, calib, pred_boxes3d_selected, final_output_dir, scores_selected, image_shape)
progress_bar.close()
# dump empty files
split_file = os.path.join(dataset.imageset_dir, '..', '..', 'ImageSets', dataset.split + '.txt')
split_file = os.path.abspath(split_file)
image_idx_list = [x.strip() for x in open(split_file).readlines()]
empty_cnt = 0
for k in range(image_idx_list.__len__()):
cur_file = os.path.join(final_output_dir, '%s.txt' % image_idx_list[k])
if not os.path.exists(cur_file):
with open(cur_file, 'w') as temp_f:
pass
empty_cnt += 1
logger.info('empty_cnt=%d: dump empty file %s' % (empty_cnt, cur_file))
ret_dict = {'empty_cnt': empty_cnt}
logger.info('-------------------performance of epoch %s---------------------' % epoch_id)
logger.info(str(datetime.now()))
avg_rpn_iou = (total_rpn_iou / max(cnt, 1.0))
avg_cls_acc = (total_cls_acc / max(cnt, 1.0))
avg_cls_acc_refined = (total_cls_acc_refined / max(cnt, 1.0))
avg_det_num = (final_total / max(len(dataset), 1.0))
logger.info('final average detections: %.3f' % avg_det_num)
logger.info('final average rpn_iou refined: %.3f' % avg_rpn_iou)
logger.info('final average cls acc: %.3f' % avg_cls_acc)
logger.info('final average cls acc refined: %.3f' % avg_cls_acc_refined)
ret_dict['rpn_iou'] = avg_rpn_iou
ret_dict['rcnn_cls_acc'] = avg_cls_acc
ret_dict['rcnn_cls_acc_refined'] = avg_cls_acc_refined
ret_dict['rcnn_avg_num'] = avg_det_num
for idx, thresh in enumerate(thresh_list):
cur_roi_recall = total_roi_recalled_bbox_list[idx] / max(total_gt_bbox, 1.0)
logger.info('total roi bbox recall(thresh=%.3f): %d / %d = %f' % (thresh, total_roi_recalled_bbox_list[idx],
total_gt_bbox, cur_roi_recall))
ret_dict['rpn_recall(thresh=%.2f)' % thresh] = cur_roi_recall
for idx, thresh in enumerate(thresh_list):
cur_recall = total_recalled_bbox_list[idx] / max(total_gt_bbox, 1.0)
logger.info('total bbox recall(thresh=%.3f): %d / %d = %f' % (thresh, total_recalled_bbox_list[idx],
total_gt_bbox, cur_recall))
ret_dict['rcnn_recall(thresh=%.2f)' % thresh] = cur_recall
if cfg.TEST.SPLIT != 'test':
logger.info('Averate Precision:')
name_to_class = {'Car': 0, 'Pedestrian': 1, 'Cyclist': 2}
ap_result_str, ap_dict = kitti_evaluate(dataset.label_dir, final_output_dir, label_split_file=split_file,
current_class=name_to_class[cfg.CLASSES])
logger.info(ap_result_str)
ret_dict.update(ap_dict)
logger.info('result is saved to: %s' % result_dir)
return ret_dict
def eval_one_epoch(model, dataloader, epoch_id, result_dir, logger):
if cfg.RPN.ENABLED and not cfg.RCNN.ENABLED:
ret_dict = eval_one_epoch_rpn(model, dataloader, epoch_id, result_dir, logger)
elif not cfg.RPN.ENABLED and cfg.RCNN.ENABLED:
ret_dict = eval_one_epoch_rcnn(model, dataloader, epoch_id, result_dir, logger)
elif cfg.RPN.ENABLED and cfg.RCNN.ENABLED:
ret_dict = eval_one_epoch_joint(model, dataloader, epoch_id, result_dir, logger)
else:
raise NotImplementedError
return ret_dict
def load_part_ckpt(model, filename, logger, total_keys=-1):
if os.path.isfile(filename):
logger.info("==> Loading part model from checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
model_state = checkpoint['model_state']
update_model_state = {key: val for key, val in model_state.items() if key in model.state_dict()}
state_dict = model.state_dict()
state_dict.update(update_model_state)
model.load_state_dict(state_dict)
update_keys = update_model_state.keys().__len__()
if update_keys == 0:
raise RuntimeError
logger.info("==> Done (loaded %d/%d)" % (update_keys, total_keys))
else:
raise FileNotFoundError
def load_ckpt_based_on_args(model, logger):
if args.ckpt is not None:
train_utils.load_checkpoint(model, filename=args.ckpt, logger=logger)
total_keys = model.state_dict().keys().__len__()
if cfg.RPN.ENABLED and args.rpn_ckpt is not None:
load_part_ckpt(model, filename=args.rpn_ckpt, logger=logger, total_keys=total_keys)
if cfg.RCNN.ENABLED and args.rcnn_ckpt is not None:
load_part_ckpt(model, filename=args.rcnn_ckpt, logger=logger, total_keys=total_keys)
def eval_single_ckpt(root_result_dir):
root_result_dir = os.path.join(root_result_dir, 'eval')
# set epoch_id and output dir
num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else []
epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'
root_result_dir = os.path.join(root_result_dir, 'epoch_%s' % epoch_id, cfg.TEST.SPLIT)
if args.test:
root_result_dir = os.path.join(root_result_dir, 'test_mode')
if args.extra_tag != 'default':
root_result_dir = os.path.join(root_result_dir, args.extra_tag)
os.makedirs(root_result_dir, exist_ok=True)
log_file = os.path.join(root_result_dir, 'log_eval_one.txt')
logger = create_logger(log_file)
logger.info('**********************Start logging**********************')
for key, val in vars(args).items():
logger.info("{:16} {}".format(key, val))
save_config_to_file(cfg, logger=logger)
# create dataloader & network
test_loader = create_dataloader(logger)
model = PointRCNN(num_classes=test_loader.dataset.num_class, use_xyz=True, mode='TEST')
model.cuda()
# copy important files to backup
backup_dir = os.path.join(root_result_dir, 'backup_files')
os.makedirs(backup_dir, exist_ok=True)
os.system('cp *.py %s/' % backup_dir)
os.system('cp ../lib/net/*.py %s/' % backup_dir)
os.system('cp ../lib/datasets/kitti_rcnn_dataset.py %s/' % backup_dir)
# load checkpoint
load_ckpt_based_on_args(model, logger)
# start evaluation
eval_one_epoch(model, test_loader, epoch_id, root_result_dir, logger)
def get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file):
ckpt_list = glob.glob(os.path.join(ckpt_dir, '*checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
evaluated_ckpt_list = [float(x.strip()) for x in open(ckpt_record_file, 'r').readlines()]
for cur_ckpt in ckpt_list:
num_list = re.findall('checkpoint_epoch_(.*).pth', cur_ckpt)
if num_list.__len__() == 0:
continue
epoch_id = num_list[-1]
if float(epoch_id) not in evaluated_ckpt_list and int(float(epoch_id)) >= args.start_epoch:
return epoch_id, cur_ckpt
return -1, None
def repeat_eval_ckpt(root_result_dir, ckpt_dir):
root_result_dir = os.path.join(root_result_dir, 'eval', 'eval_all_' + args.extra_tag)
os.makedirs(root_result_dir, exist_ok=True)
log_file = os.path.join(root_result_dir, 'log_eval_all_%s.txt' % cfg.TEST.SPLIT)
logger = create_logger(log_file)
logger.info('**********************Start logging**********************')
# save config
for key, val in vars(args).items():
logger.info("{:16} {}".format(key, val))
save_config_to_file(cfg, logger=logger)
# create dataloader & network
test_loader = create_dataloader(logger)
model = PointRCNN(num_classes=test_loader.dataset.num_class, use_xyz=True, mode='TEST')
model.cuda()
# copy important files to backup
backup_dir = os.path.join(root_result_dir, 'backup_files')
os.makedirs(backup_dir, exist_ok=True)
os.system('cp *.py %s/' % backup_dir)
os.system('cp ../lib/net/*.py %s/' % backup_dir)
os.system('cp ../lib/datasets/kitti_rcnn_dataset.py %s/' % backup_dir)
# evaluated ckpt record
ckpt_record_file = os.path.join(root_result_dir, 'eval_list_%s.txt' % cfg.TEST.SPLIT)
with open(ckpt_record_file, 'a'):
pass
# tensorboard log
tb_log = SummaryWriter(log_dir=os.path.join(root_result_dir, 'tensorboard_%s' % cfg.TEST.SPLIT))
while True:
# check whether there is checkpoint which is not evaluated
cur_epoch_id, cur_ckpt = get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file)
if cur_epoch_id == -1 or int(float(cur_epoch_id)) < args.start_epoch:
wait_second = 30
print('Wait %s second for next check: %s' % (wait_second, ckpt_dir))
time.sleep(wait_second)
continue
# load checkpoint
train_utils.load_checkpoint(model, filename=cur_ckpt)
# start evaluation
cur_result_dir = os.path.join(root_result_dir, 'epoch_%s' % cur_epoch_id, cfg.TEST.SPLIT)
tb_dict = eval_one_epoch(model, test_loader, cur_epoch_id, cur_result_dir, logger)
step = int(float(cur_epoch_id))
if step == float(cur_epoch_id):
for key, val in tb_dict.items():
tb_log.add_scalar(key, val, step)
# record this epoch which has been evaluated
with open(ckpt_record_file, 'a') as f:
print('%s' % cur_epoch_id, file=f)
logger.info('Epoch %s has been evaluated' % cur_epoch_id)
def create_dataloader(logger):
mode = 'TEST' if args.test else 'EVAL'
DATA_PATH = os.path.join('..', 'data')
# create dataloader
test_set = KittiRCNNDataset(root_dir=DATA_PATH, npoints=cfg.RPN.NUM_POINTS, split=cfg.TEST.SPLIT, mode=mode,
random_select=args.random_select,
rcnn_eval_roi_dir=args.rcnn_eval_roi_dir,
rcnn_eval_feature_dir=args.rcnn_eval_feature_dir,
classes=cfg.CLASSES,
logger=logger)
test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, pin_memory=True,
num_workers=args.workers, collate_fn=test_set.collate_batch)
return test_loader
if __name__ == "__main__":
# merge config and log to file
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.TAG = os.path.splitext(os.path.basename(args.cfg_file))[0]
if args.eval_mode == 'rpn':
cfg.RPN.ENABLED = True
cfg.RCNN.ENABLED = False
root_result_dir = os.path.join('../', 'output', 'rpn', cfg.TAG)
ckpt_dir = os.path.join('../', 'output', 'rpn', cfg.TAG, 'ckpt')
elif args.eval_mode == 'rcnn':
cfg.RCNN.ENABLED = True
cfg.RPN.ENABLED = cfg.RPN.FIXED = True
root_result_dir = os.path.join('../', 'output', 'rcnn', cfg.TAG)
ckpt_dir = os.path.join('../', 'output', 'rcnn', cfg.TAG, 'ckpt')
elif args.eval_mode == 'rcnn_offline':
cfg.RCNN.ENABLED = True
cfg.RPN.ENABLED = False
root_result_dir = os.path.join('../', 'output', 'rcnn', cfg.TAG)
ckpt_dir = os.path.join('../', 'output', 'rcnn', cfg.TAG, 'ckpt')
assert args.rcnn_eval_roi_dir is not None and args.rcnn_eval_feature_dir is not None
else:
raise NotImplementedError
if args.ckpt_dir is not None:
ckpt_dir = args.ckpt_dir
if args.output_dir is not None:
root_result_dir = args.output_dir
os.makedirs(root_result_dir, exist_ok=True)
with torch.no_grad():
if args.eval_all:
assert os.path.exists(ckpt_dir), '%s' % ckpt_dir
repeat_eval_ckpt(root_result_dir, ckpt_dir)
else:
eval_single_ckpt(root_result_dir) | 45.372193 | 125 | 0.621314 |
557f90b0870602c2c34c121033cbf688ebe233bb | 477 | py | Python | font/param.py | Programish/ascii | 6f946fead975d93908b82744a402fd8918643742 | [
"MIT"
] | null | null | null | font/param.py | Programish/ascii | 6f946fead975d93908b82744a402fd8918643742 | [
"MIT"
] | null | null | null | font/param.py | Programish/ascii | 6f946fead975d93908b82744a402fd8918643742 | [
"MIT"
] | null | null | null | # charsets
PRINTABLE_ASCII = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&\'()*+,-./:;?@[\\]^_`{|}~ '
# ...
# fonts
DEFAULT_FONT = 'DejaVuSansMono.ttf'
# DEFAULT_SIZE = 18
# DEFAULT_DIM = (15, 23)
# DEFAULT_OFFSET = (2, 0)
DEFAULT_SIZE = 15
DEFAULT_DIM = (18, 9)
DEFAULT_OFFSET = (0, 0)
# transform
# DEFAULT_TRANSPOSE_SCALE = [0.5, 0.25]
# DEFAULT_AFFINE_SCALE = 0.1
DEFAULT_TRANSPOSE_SCALE = [0.5, 0.25]
DEFAULT_AFFINE_SCALE = 0.1
| 25.105263 | 115 | 0.666667 |
aa6e1eaea543b8c828071659c19f1506c7a7c0a7 | 3,684 | py | Python | prepare.py | foolishflyfox/Person_reID_baseline_pytorch | 08fc955071feff75bc7101eb6db1eb856e5d1e88 | [
"MIT"
] | null | null | null | prepare.py | foolishflyfox/Person_reID_baseline_pytorch | 08fc955071feff75bc7101eb6db1eb856e5d1e88 | [
"MIT"
] | null | null | null | prepare.py | foolishflyfox/Person_reID_baseline_pytorch | 08fc955071feff75bc7101eb6db1eb856e5d1e88 | [
"MIT"
] | null | null | null | import os
from shutil import copyfile
# You only need to change this line to your dataset download path
download_path = '/home/zzheng/Downloads/Market'
if not os.path.isdir(download_path):
print('please change the download_path')
save_path = download_path + '/pytorch'
if not os.path.isdir(save_path):
os.mkdir(save_path)
#-----------------------------------------
#query
query_path = download_path + '/query'
query_save_path = download_path + '/pytorch/query'
if not os.path.isdir(query_save_path):
os.mkdir(query_save_path)
for root, dirs, files in os.walk(query_path, topdown=True):
for name in files:
if not name[-3:]=='jpg':
continue
ID = name.split('_')
src_path = query_path + '/' + name
dst_path = query_save_path + '/' + ID[0]
if not os.path.isdir(dst_path):
os.mkdir(dst_path)
copyfile(src_path, dst_path + '/' + name)
#-----------------------------------------
#multi-query
query_path = download_path + '/gt_bbox'
# for dukemtmc-reid, we do not need multi-query
if os.path.isdir(query_path):
query_save_path = download_path + '/pytorch/multi-query'
if not os.path.isdir(query_save_path):
os.mkdir(query_save_path)
for root, dirs, files in os.walk(query_path, topdown=True):
for name in files:
if not name[-3:]=='jpg':
continue
ID = name.split('_')
src_path = query_path + '/' + name
dst_path = query_save_path + '/' + ID[0]
if not os.path.isdir(dst_path):
os.mkdir(dst_path)
copyfile(src_path, dst_path + '/' + name)
#-----------------------------------------
#gallery
gallery_path = download_path + '/bounding_box_test'
gallery_save_path = download_path + '/pytorch/gallery'
if not os.path.isdir(gallery_save_path):
os.mkdir(gallery_save_path)
for root, dirs, files in os.walk(gallery_path, topdown=True):
for name in files:
if not name[-3:]=='jpg':
continue
ID = name.split('_')
src_path = gallery_path + '/' + name
dst_path = gallery_save_path + '/' + ID[0]
if not os.path.isdir(dst_path):
os.mkdir(dst_path)
copyfile(src_path, dst_path + '/' + name)
#---------------------------------------
#train_all
train_path = download_path + '/bounding_box_train'
train_save_path = download_path + '/pytorch/train_all'
if not os.path.isdir(train_save_path):
os.mkdir(train_save_path)
for root, dirs, files in os.walk(train_path, topdown=True):
for name in files:
if not name[-3:]=='jpg':
continue
ID = name.split('_')
src_path = train_path + '/' + name
dst_path = train_save_path + '/' + ID[0]
if not os.path.isdir(dst_path):
os.mkdir(dst_path)
copyfile(src_path, dst_path + '/' + name)
#---------------------------------------
#train_val
train_path = download_path + '/bounding_box_train'
train_save_path = download_path + '/pytorch/train'
val_save_path = download_path + '/pytorch/val'
if not os.path.isdir(train_save_path):
os.mkdir(train_save_path)
os.mkdir(val_save_path)
for root, dirs, files in os.walk(train_path, topdown=True):
for name in files:
if not name[-3:]=='jpg':
continue
ID = name.split('_')
src_path = train_path + '/' + name
dst_path = train_save_path + '/' + ID[0]
if not os.path.isdir(dst_path):
os.mkdir(dst_path)
dst_path = val_save_path + '/' + ID[0] #first image is used as val image
os.mkdir(dst_path)
copyfile(src_path, dst_path + '/' + name)
| 33.798165 | 85 | 0.591477 |
ac0f04b42265675964a1051e0298dee0ef3e2679 | 53,360 | py | Python | openfold/utils/loss.py | ychnh/openfold | e61a00d063c0f2d939d24963929cea2b413d3e8e | [
"Apache-2.0"
] | 1 | 2022-02-11T15:43:36.000Z | 2022-02-11T15:43:36.000Z | openfold/utils/loss.py | ychnh/openfold | e61a00d063c0f2d939d24963929cea2b413d3e8e | [
"Apache-2.0"
] | null | null | null | openfold/utils/loss.py | ychnh/openfold | e61a00d063c0f2d939d24963929cea2b413d3e8e | [
"Apache-2.0"
] | 1 | 2022-02-22T05:53:03.000Z | 2022-02-22T05:53:03.000Z | # Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import logging
import ml_collections
import numpy as np
import torch
import torch.nn as nn
from torch.distributions.bernoulli import Bernoulli
from typing import Dict, Optional, Tuple
from openfold.np import residue_constants
from openfold.utils import feats
from openfold.utils.rigid_utils import Rotation, Rigid
from openfold.utils.tensor_utils import (
tree_map,
tensor_tree_map,
masked_mean,
permute_final_dims,
batched_gather,
)
def softmax_cross_entropy(logits, labels):
loss = -1 * torch.sum(
labels * torch.nn.functional.log_softmax(logits, dim=-1),
dim=-1,
)
return loss
def sigmoid_cross_entropy(logits, labels):
log_p = torch.log(torch.sigmoid(logits))
log_not_p = torch.log(torch.sigmoid(-logits))
loss = -labels * log_p - (1 - labels) * log_not_p
return loss
def torsion_angle_loss(
a, # [*, N, 7, 2]
a_gt, # [*, N, 7, 2]
a_alt_gt, # [*, N, 7, 2]
):
# [*, N, 7]
norm = torch.norm(a, dim=-1)
# [*, N, 7, 2]
a = a / norm.unsqueeze(-1)
# [*, N, 7]
diff_norm_gt = torch.norm(a - a_gt, dim=-1)
diff_norm_alt_gt = torch.norm(a - a_alt_gt, dim=-1)
min_diff = torch.minimum(diff_norm_gt ** 2, diff_norm_alt_gt ** 2)
# [*]
l_torsion = torch.mean(min_diff, dim=(-1, -2))
l_angle_norm = torch.mean(torch.abs(norm - 1), dim=(-1, -2))
an_weight = 0.02
return l_torsion + an_weight * l_angle_norm
def compute_fape(
pred_frames: Rigid,
target_frames: Rigid,
frames_mask: torch.Tensor,
pred_positions: torch.Tensor,
target_positions: torch.Tensor,
positions_mask: torch.Tensor,
length_scale: float,
l1_clamp_distance: Optional[float] = None,
eps=1e-8,
) -> torch.Tensor:
"""
Computes FAPE loss.
Args:
pred_frames:
[*, N_frames] Rigid object of predicted frames
target_frames:
[*, N_frames] Rigid object of ground truth frames
frames_mask:
[*, N_frames] binary mask for the frames
pred_positions:
[*, N_pts, 3] predicted atom positions
target_positions:
[*, N_pts, 3] ground truth positions
positions_mask:
[*, N_pts] positions mask
length_scale:
Length scale by which the loss is divided
l1_clamp_distance:
Cutoff above which distance errors are disregarded
eps:
Small value used to regularize denominators
Returns:
[*] loss tensor
"""
# [*, N_frames, N_pts, 3]
local_pred_pos = pred_frames.invert()[..., None].apply(
pred_positions[..., None, :, :],
)
local_target_pos = target_frames.invert()[..., None].apply(
target_positions[..., None, :, :],
)
error_dist = torch.sqrt(
torch.sum((local_pred_pos - local_target_pos) ** 2, dim=-1) + eps
)
if l1_clamp_distance is not None:
error_dist = torch.clamp(error_dist, min=0, max=l1_clamp_distance)
normed_error = error_dist / length_scale
normed_error = normed_error * frames_mask[..., None]
normed_error = normed_error * positions_mask[..., None, :]
# FP16-friendly averaging. Roughly equivalent to:
#
# norm_factor = (
# torch.sum(frames_mask, dim=-1) *
# torch.sum(positions_mask, dim=-1)
# )
# normed_error = torch.sum(normed_error, dim=(-1, -2)) / (eps + norm_factor)
#
# ("roughly" because eps is necessarily duplicated in the latter)
normed_error = torch.sum(normed_error, dim=-1)
normed_error = (
normed_error / (eps + torch.sum(frames_mask, dim=-1))[..., None]
)
normed_error = torch.sum(normed_error, dim=-1)
normed_error = normed_error / (eps + torch.sum(positions_mask, dim=-1))
return normed_error
def backbone_loss(
backbone_rigid_tensor: torch.Tensor,
backbone_rigid_mask: torch.Tensor,
traj: torch.Tensor,
use_clamped_fape: Optional[torch.Tensor] = None,
clamp_distance: float = 10.0,
loss_unit_distance: float = 10.0,
eps: float = 1e-4,
**kwargs,
) -> torch.Tensor:
pred_aff = Rigid.from_tensor_7(traj)
pred_aff = Rigid(
Rotation(rot_mats=pred_aff.get_rots().get_rot_mats(), quats=None),
pred_aff.get_trans(),
)
# DISCREPANCY: DeepMind somehow gets a hold of a tensor_7 version of
# backbone tensor, normalizes it, and then turns it back to a rotation
# matrix. To avoid a potentially numerically unstable rotation matrix
# to quaternion conversion, we just use the original rotation matrix
# outright. This one hasn't been composed a bunch of times, though, so
# it might be fine.
gt_aff = Rigid.from_tensor_4x4(backbone_rigid_tensor)
fape_loss = compute_fape(
pred_aff,
gt_aff[None],
backbone_rigid_mask[None],
pred_aff.get_trans(),
gt_aff[None].get_trans(),
backbone_rigid_mask[None],
l1_clamp_distance=clamp_distance,
length_scale=loss_unit_distance,
eps=eps,
)
if use_clamped_fape is not None:
unclamped_fape_loss = compute_fape(
pred_aff,
gt_aff[None],
backbone_rigid_mask[None],
pred_aff.get_trans(),
gt_aff[None].get_trans(),
backbone_rigid_mask[None],
l1_clamp_distance=None,
length_scale=loss_unit_distance,
eps=eps,
)
fape_loss = fape_loss * use_clamped_fape + unclamped_fape_loss * (
1 - use_clamped_fape
)
# Average over the batch dimension
fape_loss = torch.mean(fape_loss)
return fape_loss
def sidechain_loss(
sidechain_frames: torch.Tensor,
sidechain_atom_pos: torch.Tensor,
rigidgroups_gt_frames: torch.Tensor,
rigidgroups_alt_gt_frames: torch.Tensor,
rigidgroups_gt_exists: torch.Tensor,
renamed_atom14_gt_positions: torch.Tensor,
renamed_atom14_gt_exists: torch.Tensor,
alt_naming_is_better: torch.Tensor,
clamp_distance: float = 10.0,
length_scale: float = 10.0,
eps: float = 1e-4,
**kwargs,
) -> torch.Tensor:
renamed_gt_frames = (
1.0 - alt_naming_is_better[..., None, None, None]
) * rigidgroups_gt_frames + alt_naming_is_better[
..., None, None, None
] * rigidgroups_alt_gt_frames
# Steamroll the inputs
sidechain_frames = sidechain_frames[-1]
batch_dims = sidechain_frames.shape[:-4]
sidechain_frames = sidechain_frames.view(*batch_dims, -1, 4, 4)
sidechain_frames = Rigid.from_tensor_4x4(sidechain_frames)
renamed_gt_frames = renamed_gt_frames.view(*batch_dims, -1, 4, 4)
renamed_gt_frames = Rigid.from_tensor_4x4(renamed_gt_frames)
rigidgroups_gt_exists = rigidgroups_gt_exists.reshape(*batch_dims, -1)
sidechain_atom_pos = sidechain_atom_pos[-1]
sidechain_atom_pos = sidechain_atom_pos.view(*batch_dims, -1, 3)
renamed_atom14_gt_positions = renamed_atom14_gt_positions.view(
*batch_dims, -1, 3
)
renamed_atom14_gt_exists = renamed_atom14_gt_exists.view(*batch_dims, -1)
fape = compute_fape(
sidechain_frames,
renamed_gt_frames,
rigidgroups_gt_exists,
sidechain_atom_pos,
renamed_atom14_gt_positions,
renamed_atom14_gt_exists,
l1_clamp_distance=clamp_distance,
length_scale=length_scale,
eps=eps,
)
return fape
def fape_loss(
out: Dict[str, torch.Tensor],
batch: Dict[str, torch.Tensor],
config: ml_collections.ConfigDict,
) -> torch.Tensor:
bb_loss = backbone_loss(
traj=out["sm"]["frames"],
**{**batch, **config.backbone},
)
sc_loss = sidechain_loss(
out["sm"]["sidechain_frames"],
out["sm"]["positions"],
**{**batch, **config.sidechain},
)
loss = config.backbone.weight * bb_loss + config.sidechain.weight * sc_loss
# Average over the batch dimension
loss = torch.mean(loss)
return loss
def supervised_chi_loss(
angles_sin_cos: torch.Tensor,
unnormalized_angles_sin_cos: torch.Tensor,
aatype: torch.Tensor,
seq_mask: torch.Tensor,
chi_mask: torch.Tensor,
chi_angles_sin_cos: torch.Tensor,
chi_weight: float,
angle_norm_weight: float,
eps=1e-6,
**kwargs,
) -> torch.Tensor:
"""
Implements Algorithm 27 (torsionAngleLoss)
Args:
angles_sin_cos:
[*, N, 7, 2] predicted angles
unnormalized_angles_sin_cos:
The same angles, but unnormalized
aatype:
[*, N] residue indices
seq_mask:
[*, N] sequence mask
chi_mask:
[*, N, 7] angle mask
chi_angles_sin_cos:
[*, N, 7, 2] ground truth angles
chi_weight:
Weight for the angle component of the loss
angle_norm_weight:
Weight for the normalization component of the loss
Returns:
[*] loss tensor
"""
pred_angles = angles_sin_cos[..., 3:, :]
residue_type_one_hot = torch.nn.functional.one_hot(
aatype,
residue_constants.restype_num + 1,
)
chi_pi_periodic = torch.einsum(
"...ij,jk->ik",
residue_type_one_hot.type(angles_sin_cos.dtype),
angles_sin_cos.new_tensor(residue_constants.chi_pi_periodic),
)
true_chi = chi_angles_sin_cos[None]
shifted_mask = (1 - 2 * chi_pi_periodic).unsqueeze(-1)
true_chi_shifted = shifted_mask * true_chi
sq_chi_error = torch.sum((true_chi - pred_angles) ** 2, dim=-1)
sq_chi_error_shifted = torch.sum(
(true_chi_shifted - pred_angles) ** 2, dim=-1
)
sq_chi_error = torch.minimum(sq_chi_error, sq_chi_error_shifted)
# The ol' switcheroo
sq_chi_error = sq_chi_error.permute(
*range(len(sq_chi_error.shape))[1:-2], 0, -2, -1
)
sq_chi_loss = masked_mean(
chi_mask[..., None, :, :], sq_chi_error, dim=(-1, -2, -3)
)
loss = chi_weight * sq_chi_loss
angle_norm = torch.sqrt(
torch.sum(unnormalized_angles_sin_cos ** 2, dim=-1) + eps
)
norm_error = torch.abs(angle_norm - 1.0)
norm_error = norm_error.permute(
*range(len(norm_error.shape))[1:-2], 0, -2, -1
)
angle_norm_loss = masked_mean(
seq_mask[..., None, :, None], norm_error, dim=(-1, -2, -3)
)
loss = loss + angle_norm_weight * angle_norm_loss
# Average over the batch dimension
loss = torch.mean(loss)
return loss
def compute_plddt(logits: torch.Tensor) -> torch.Tensor:
num_bins = logits.shape[-1]
bin_width = 1.0 / num_bins
bounds = torch.arange(
start=0.5 * bin_width, end=1.0, step=bin_width, device=logits.device
)
probs = torch.nn.functional.softmax(logits, dim=-1)
pred_lddt_ca = torch.sum(
probs * bounds.view(*((1,) * len(probs.shape[:-1])), *bounds.shape),
dim=-1,
)
return pred_lddt_ca * 100
def lddt(
all_atom_pred_pos: torch.Tensor,
all_atom_positions: torch.Tensor,
all_atom_mask: torch.Tensor,
cutoff: float = 15.0,
eps: float = 1e-10,
per_residue: bool = True,
) -> torch.Tensor:
n = all_atom_mask.shape[-2]
dmat_true = torch.sqrt(
eps
+ torch.sum(
(
all_atom_positions[..., None, :]
- all_atom_positions[..., None, :, :]
)
** 2,
dim=-1,
)
)
dmat_pred = torch.sqrt(
eps
+ torch.sum(
(
all_atom_pred_pos[..., None, :]
- all_atom_pred_pos[..., None, :, :]
)
** 2,
dim=-1,
)
)
dists_to_score = (
(dmat_true < cutoff)
* all_atom_mask
* permute_final_dims(all_atom_mask, (1, 0))
* (1.0 - torch.eye(n, device=all_atom_mask.device))
)
dist_l1 = torch.abs(dmat_true - dmat_pred)
score = (
(dist_l1 < 0.5).type(dist_l1.dtype)
+ (dist_l1 < 1.0).type(dist_l1.dtype)
+ (dist_l1 < 2.0).type(dist_l1.dtype)
+ (dist_l1 < 4.0).type(dist_l1.dtype)
)
score = score * 0.25
dims = (-1,) if per_residue else (-2, -1)
norm = 1.0 / (eps + torch.sum(dists_to_score, dim=dims))
score = norm * (eps + torch.sum(dists_to_score * score, dim=dims))
return score
def lddt_ca(
all_atom_pred_pos: torch.Tensor,
all_atom_positions: torch.Tensor,
all_atom_mask: torch.Tensor,
cutoff: float = 15.0,
eps: float = 1e-10,
per_residue: bool = True,
) -> torch.Tensor:
ca_pos = residue_constants.atom_order["CA"]
all_atom_pred_pos = all_atom_pred_pos[..., ca_pos, :]
all_atom_positions = all_atom_positions[..., ca_pos, :]
all_atom_mask = all_atom_mask[..., ca_pos : (ca_pos + 1)] # keep dim
return lddt(
all_atom_pred_pos,
all_atom_positions,
all_atom_mask,
cutoff=cutoff,
eps=eps,
per_residue=per_residue,
)
def lddt_loss(
logits: torch.Tensor,
all_atom_pred_pos: torch.Tensor,
all_atom_positions: torch.Tensor,
all_atom_mask: torch.Tensor,
resolution: torch.Tensor,
cutoff: float = 15.0,
no_bins: int = 50,
min_resolution: float = 0.1,
max_resolution: float = 3.0,
eps: float = 1e-10,
**kwargs,
) -> torch.Tensor:
n = all_atom_mask.shape[-2]
ca_pos = residue_constants.atom_order["CA"]
all_atom_pred_pos = all_atom_pred_pos[..., ca_pos, :]
all_atom_positions = all_atom_positions[..., ca_pos, :]
all_atom_mask = all_atom_mask[..., ca_pos : (ca_pos + 1)] # keep dim
score = lddt(
all_atom_pred_pos,
all_atom_positions,
all_atom_mask,
cutoff=cutoff,
eps=eps
)
score = score.detach()
bin_index = torch.floor(score * no_bins).long()
bin_index = torch.clamp(bin_index, max=(no_bins - 1))
lddt_ca_one_hot = torch.nn.functional.one_hot(
bin_index, num_classes=no_bins
)
errors = softmax_cross_entropy(logits, lddt_ca_one_hot)
all_atom_mask = all_atom_mask.squeeze(-1)
loss = torch.sum(errors * all_atom_mask, dim=-1) / (
eps + torch.sum(all_atom_mask, dim=-1)
)
loss = loss * (
(resolution >= min_resolution) & (resolution <= max_resolution)
)
# Average over the batch dimension
loss = torch.mean(loss)
return loss
def distogram_loss(
logits,
pseudo_beta,
pseudo_beta_mask,
min_bin=2.3125,
max_bin=21.6875,
no_bins=64,
eps=1e-6,
**kwargs,
):
boundaries = torch.linspace(
min_bin,
max_bin,
no_bins - 1,
device=logits.device,
)
boundaries = boundaries ** 2
dists = torch.sum(
(pseudo_beta[..., None, :] - pseudo_beta[..., None, :, :]) ** 2,
dim=-1,
keepdims=True,
)
true_bins = torch.sum(dists > boundaries, dim=-1)
errors = softmax_cross_entropy(
logits,
torch.nn.functional.one_hot(true_bins, no_bins),
)
square_mask = pseudo_beta_mask[..., None] * pseudo_beta_mask[..., None, :]
# FP16-friendly sum. Equivalent to:
# mean = (torch.sum(errors * square_mask, dim=(-1, -2)) /
# (eps + torch.sum(square_mask, dim=(-1, -2))))
denom = eps + torch.sum(square_mask, dim=(-1, -2))
mean = errors * square_mask
mean = torch.sum(mean, dim=-1)
mean = mean / denom[..., None]
mean = torch.sum(mean, dim=-1)
# Average over the batch dimensions
mean = torch.mean(mean)
return mean
def _calculate_bin_centers(boundaries: torch.Tensor):
step = boundaries[1] - boundaries[0]
bin_centers = boundaries + step / 2
bin_centers = torch.cat(
[bin_centers, (bin_centers[-1] + step).unsqueeze(-1)], dim=0
)
return bin_centers
def _calculate_expected_aligned_error(
alignment_confidence_breaks: torch.Tensor,
aligned_distance_error_probs: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
bin_centers = _calculate_bin_centers(alignment_confidence_breaks)
return (
torch.sum(aligned_distance_error_probs * bin_centers, dim=-1),
bin_centers[-1],
)
def compute_predicted_aligned_error(
logits: torch.Tensor,
max_bin: int = 31,
no_bins: int = 64,
**kwargs,
) -> Dict[str, torch.Tensor]:
"""Computes aligned confidence metrics from logits.
Args:
logits: [*, num_res, num_res, num_bins] the logits output from
PredictedAlignedErrorHead.
max_bin: Maximum bin value
no_bins: Number of bins
Returns:
aligned_confidence_probs: [*, num_res, num_res, num_bins] the predicted
aligned error probabilities over bins for each residue pair.
predicted_aligned_error: [*, num_res, num_res] the expected aligned distance
error for each pair of residues.
max_predicted_aligned_error: [*] the maximum predicted error possible.
"""
boundaries = torch.linspace(
0, max_bin, steps=(no_bins - 1), device=logits.device
)
aligned_confidence_probs = torch.nn.functional.softmax(logits, dim=-1)
(
predicted_aligned_error,
max_predicted_aligned_error,
) = _calculate_expected_aligned_error(
alignment_confidence_breaks=boundaries,
aligned_distance_error_probs=aligned_confidence_probs,
)
return {
"aligned_confidence_probs": aligned_confidence_probs,
"predicted_aligned_error": predicted_aligned_error,
"max_predicted_aligned_error": max_predicted_aligned_error,
}
def compute_tm(
logits: torch.Tensor,
residue_weights: Optional[torch.Tensor] = None,
max_bin: int = 31,
no_bins: int = 64,
eps: float = 1e-8,
**kwargs,
) -> torch.Tensor:
if residue_weights is None:
residue_weights = logits.new_ones(logits.shape[-2])
boundaries = torch.linspace(
0, max_bin, steps=(no_bins - 1), device=logits.device
)
bin_centers = _calculate_bin_centers(boundaries)
torch.sum(residue_weights)
n = logits.shape[-2]
clipped_n = max(n, 19)
d0 = 1.24 * (clipped_n - 15) ** (1.0 / 3) - 1.8
probs = torch.nn.functional.softmax(logits, dim=-1)
tm_per_bin = 1.0 / (1 + (bin_centers ** 2) / (d0 ** 2))
predicted_tm_term = torch.sum(probs * tm_per_bin, dim=-1)
normed_residue_mask = residue_weights / (eps + residue_weights.sum())
per_alignment = torch.sum(predicted_tm_term * normed_residue_mask, dim=-1)
weighted = per_alignment * residue_weights
argmax = (weighted == torch.max(weighted)).nonzero()[0]
return per_alignment[tuple(argmax)]
def tm_loss(
logits,
final_affine_tensor,
backbone_rigid_tensor,
backbone_rigid_mask,
resolution,
max_bin=31,
no_bins=64,
min_resolution: float = 0.1,
max_resolution: float = 3.0,
eps=1e-8,
**kwargs,
):
pred_affine = Rigid.from_tensor_7(final_affine_tensor)
backbone_rigid = Rigid.from_tensor_4x4(backbone_rigid_tensor)
def _points(affine):
pts = affine.get_trans()[..., None, :, :]
return affine.invert()[..., None].apply(pts)
sq_diff = torch.sum(
(_points(pred_affine) - _points(backbone_rigid)) ** 2, dim=-1
)
sq_diff = sq_diff.detach()
boundaries = torch.linspace(
0, max_bin, steps=(no_bins - 1), device=logits.device
)
boundaries = boundaries ** 2
true_bins = torch.sum(sq_diff[..., None] > boundaries, dim=-1)
errors = softmax_cross_entropy(
logits, torch.nn.functional.one_hot(true_bins, no_bins)
)
square_mask = (
backbone_rigid_mask[..., None] * backbone_rigid_mask[..., None, :]
)
loss = torch.sum(errors * square_mask, dim=-1)
scale = 0.5 # hack to help FP16 training along
denom = eps + torch.sum(scale * square_mask, dim=(-1, -2))
loss = loss / denom[..., None]
loss = torch.sum(loss, dim=-1)
loss = loss * scale
loss = loss * (
(resolution >= min_resolution) & (resolution <= max_resolution)
)
# Average over the loss dimension
loss = torch.mean(loss)
return loss
def between_residue_bond_loss(
pred_atom_positions: torch.Tensor, # (*, N, 37/14, 3)
pred_atom_mask: torch.Tensor, # (*, N, 37/14)
residue_index: torch.Tensor, # (*, N)
aatype: torch.Tensor, # (*, N)
tolerance_factor_soft=12.0,
tolerance_factor_hard=12.0,
eps=1e-6,
) -> Dict[str, torch.Tensor]:
"""Flat-bottom loss to penalize structural violations between residues.
This is a loss penalizing any violation of the geometry around the peptide
bond between consecutive amino acids. This loss corresponds to
Jumper et al. (2021) Suppl. Sec. 1.9.11, eq 44, 45.
Args:
pred_atom_positions: Atom positions in atom37/14 representation
pred_atom_mask: Atom mask in atom37/14 representation
residue_index: Residue index for given amino acid, this is assumed to be
monotonically increasing.
aatype: Amino acid type of given residue
tolerance_factor_soft: soft tolerance factor measured in standard deviations
of pdb distributions
tolerance_factor_hard: hard tolerance factor measured in standard deviations
of pdb distributions
Returns:
Dict containing:
* 'c_n_loss_mean': Loss for peptide bond length violations
* 'ca_c_n_loss_mean': Loss for violations of bond angle around C spanned
by CA, C, N
* 'c_n_ca_loss_mean': Loss for violations of bond angle around N spanned
by C, N, CA
* 'per_residue_loss_sum': sum of all losses for each residue
* 'per_residue_violation_mask': mask denoting all residues with violation
present.
"""
# Get the positions of the relevant backbone atoms.
this_ca_pos = pred_atom_positions[..., :-1, 1, :]
this_ca_mask = pred_atom_mask[..., :-1, 1]
this_c_pos = pred_atom_positions[..., :-1, 2, :]
this_c_mask = pred_atom_mask[..., :-1, 2]
next_n_pos = pred_atom_positions[..., 1:, 0, :]
next_n_mask = pred_atom_mask[..., 1:, 0]
next_ca_pos = pred_atom_positions[..., 1:, 1, :]
next_ca_mask = pred_atom_mask[..., 1:, 1]
has_no_gap_mask = (residue_index[..., 1:] - residue_index[..., :-1]) == 1.0
# Compute loss for the C--N bond.
c_n_bond_length = torch.sqrt(
eps + torch.sum((this_c_pos - next_n_pos) ** 2, dim=-1)
)
# The C-N bond to proline has slightly different length because of the ring.
next_is_proline = aatype[..., 1:] == residue_constants.resname_to_idx["PRO"]
gt_length = (
~next_is_proline
) * residue_constants.between_res_bond_length_c_n[
0
] + next_is_proline * residue_constants.between_res_bond_length_c_n[
1
]
gt_stddev = (
~next_is_proline
) * residue_constants.between_res_bond_length_stddev_c_n[
0
] + next_is_proline * residue_constants.between_res_bond_length_stddev_c_n[
1
]
c_n_bond_length_error = torch.sqrt(eps + (c_n_bond_length - gt_length) ** 2)
c_n_loss_per_residue = torch.nn.functional.relu(
c_n_bond_length_error - tolerance_factor_soft * gt_stddev
)
mask = this_c_mask * next_n_mask * has_no_gap_mask
c_n_loss = torch.sum(mask * c_n_loss_per_residue, dim=-1) / (
torch.sum(mask, dim=-1) + eps
)
c_n_violation_mask = mask * (
c_n_bond_length_error > (tolerance_factor_hard * gt_stddev)
)
# Compute loss for the angles.
ca_c_bond_length = torch.sqrt(
eps + torch.sum((this_ca_pos - this_c_pos) ** 2, dim=-1)
)
n_ca_bond_length = torch.sqrt(
eps + torch.sum((next_n_pos - next_ca_pos) ** 2, dim=-1)
)
c_ca_unit_vec = (this_ca_pos - this_c_pos) / ca_c_bond_length[..., None]
c_n_unit_vec = (next_n_pos - this_c_pos) / c_n_bond_length[..., None]
n_ca_unit_vec = (next_ca_pos - next_n_pos) / n_ca_bond_length[..., None]
ca_c_n_cos_angle = torch.sum(c_ca_unit_vec * c_n_unit_vec, dim=-1)
gt_angle = residue_constants.between_res_cos_angles_ca_c_n[0]
gt_stddev = residue_constants.between_res_bond_length_stddev_c_n[0]
ca_c_n_cos_angle_error = torch.sqrt(
eps + (ca_c_n_cos_angle - gt_angle) ** 2
)
ca_c_n_loss_per_residue = torch.nn.functional.relu(
ca_c_n_cos_angle_error - tolerance_factor_soft * gt_stddev
)
mask = this_ca_mask * this_c_mask * next_n_mask * has_no_gap_mask
ca_c_n_loss = torch.sum(mask * ca_c_n_loss_per_residue, dim=-1) / (
torch.sum(mask, dim=-1) + eps
)
ca_c_n_violation_mask = mask * (
ca_c_n_cos_angle_error > (tolerance_factor_hard * gt_stddev)
)
c_n_ca_cos_angle = torch.sum((-c_n_unit_vec) * n_ca_unit_vec, dim=-1)
gt_angle = residue_constants.between_res_cos_angles_c_n_ca[0]
gt_stddev = residue_constants.between_res_cos_angles_c_n_ca[1]
c_n_ca_cos_angle_error = torch.sqrt(
eps + torch.square(c_n_ca_cos_angle - gt_angle)
)
c_n_ca_loss_per_residue = torch.nn.functional.relu(
c_n_ca_cos_angle_error - tolerance_factor_soft * gt_stddev
)
mask = this_c_mask * next_n_mask * next_ca_mask * has_no_gap_mask
c_n_ca_loss = torch.sum(mask * c_n_ca_loss_per_residue, dim=-1) / (
torch.sum(mask, dim=-1) + eps
)
c_n_ca_violation_mask = mask * (
c_n_ca_cos_angle_error > (tolerance_factor_hard * gt_stddev)
)
# Compute a per residue loss (equally distribute the loss to both
# neighbouring residues).
per_residue_loss_sum = (
c_n_loss_per_residue + ca_c_n_loss_per_residue + c_n_ca_loss_per_residue
)
per_residue_loss_sum = 0.5 * (
torch.nn.functional.pad(per_residue_loss_sum, (0, 1))
+ torch.nn.functional.pad(per_residue_loss_sum, (1, 0))
)
# Compute hard violations.
violation_mask = torch.max(
torch.stack(
[c_n_violation_mask, ca_c_n_violation_mask, c_n_ca_violation_mask],
dim=-2,
),
dim=-2,
)[0]
violation_mask = torch.maximum(
torch.nn.functional.pad(violation_mask, (0, 1)),
torch.nn.functional.pad(violation_mask, (1, 0)),
)
return {
"c_n_loss_mean": c_n_loss,
"ca_c_n_loss_mean": ca_c_n_loss,
"c_n_ca_loss_mean": c_n_ca_loss,
"per_residue_loss_sum": per_residue_loss_sum,
"per_residue_violation_mask": violation_mask,
}
def between_residue_clash_loss(
atom14_pred_positions: torch.Tensor,
atom14_atom_exists: torch.Tensor,
atom14_atom_radius: torch.Tensor,
residue_index: torch.Tensor,
overlap_tolerance_soft=1.5,
overlap_tolerance_hard=1.5,
eps=1e-10,
) -> Dict[str, torch.Tensor]:
"""Loss to penalize steric clashes between residues.
This is a loss penalizing any steric clashes due to non bonded atoms in
different peptides coming too close. This loss corresponds to the part with
different residues of
Jumper et al. (2021) Suppl. Sec. 1.9.11, eq 46.
Args:
atom14_pred_positions: Predicted positions of atoms in
global prediction frame
atom14_atom_exists: Mask denoting whether atom at positions exists for given
amino acid type
atom14_atom_radius: Van der Waals radius for each atom.
residue_index: Residue index for given amino acid.
overlap_tolerance_soft: Soft tolerance factor.
overlap_tolerance_hard: Hard tolerance factor.
Returns:
Dict containing:
* 'mean_loss': average clash loss
* 'per_atom_loss_sum': sum of all clash losses per atom, shape (N, 14)
* 'per_atom_clash_mask': mask whether atom clashes with any other atom
shape (N, 14)
"""
fp_type = atom14_pred_positions.dtype
# Create the distance matrix.
# (N, N, 14, 14)
dists = torch.sqrt(
eps
+ torch.sum(
(
atom14_pred_positions[..., :, None, :, None, :]
- atom14_pred_positions[..., None, :, None, :, :]
)
** 2,
dim=-1,
)
)
# Create the mask for valid distances.
# shape (N, N, 14, 14)
dists_mask = (
atom14_atom_exists[..., :, None, :, None]
* atom14_atom_exists[..., None, :, None, :]
).type(fp_type)
# Mask out all the duplicate entries in the lower triangular matrix.
# Also mask out the diagonal (atom-pairs from the same residue) -- these atoms
# are handled separately.
dists_mask = dists_mask * (
residue_index[..., :, None, None, None]
< residue_index[..., None, :, None, None]
)
# Backbone C--N bond between subsequent residues is no clash.
c_one_hot = torch.nn.functional.one_hot(
residue_index.new_tensor(2), num_classes=14
)
c_one_hot = c_one_hot.reshape(
*((1,) * len(residue_index.shape[:-1])), *c_one_hot.shape
)
c_one_hot = c_one_hot.type(fp_type)
n_one_hot = torch.nn.functional.one_hot(
residue_index.new_tensor(0), num_classes=14
)
n_one_hot = n_one_hot.reshape(
*((1,) * len(residue_index.shape[:-1])), *n_one_hot.shape
)
n_one_hot = n_one_hot.type(fp_type)
neighbour_mask = (
residue_index[..., :, None, None, None] + 1
) == residue_index[..., None, :, None, None]
c_n_bonds = (
neighbour_mask
* c_one_hot[..., None, None, :, None]
* n_one_hot[..., None, None, None, :]
)
dists_mask = dists_mask * (1.0 - c_n_bonds)
# Disulfide bridge between two cysteines is no clash.
cys = residue_constants.restype_name_to_atom14_names["CYS"]
cys_sg_idx = cys.index("SG")
cys_sg_idx = residue_index.new_tensor(cys_sg_idx)
cys_sg_idx = cys_sg_idx.reshape(
*((1,) * len(residue_index.shape[:-1])), 1
).squeeze(-1)
cys_sg_one_hot = torch.nn.functional.one_hot(cys_sg_idx, num_classes=14)
disulfide_bonds = (
cys_sg_one_hot[..., None, None, :, None]
* cys_sg_one_hot[..., None, None, None, :]
)
dists_mask = dists_mask * (1.0 - disulfide_bonds)
# Compute the lower bound for the allowed distances.
# shape (N, N, 14, 14)
dists_lower_bound = dists_mask * (
atom14_atom_radius[..., :, None, :, None]
+ atom14_atom_radius[..., None, :, None, :]
)
# Compute the error.
# shape (N, N, 14, 14)
dists_to_low_error = dists_mask * torch.nn.functional.relu(
dists_lower_bound - overlap_tolerance_soft - dists
)
# Compute the mean loss.
# shape ()
mean_loss = torch.sum(dists_to_low_error) / (1e-6 + torch.sum(dists_mask))
# Compute the per atom loss sum.
# shape (N, 14)
per_atom_loss_sum = torch.sum(dists_to_low_error, dim=(-4, -2)) + torch.sum(
dists_to_low_error, axis=(-3, -1)
)
# Compute the hard clash mask.
# shape (N, N, 14, 14)
clash_mask = dists_mask * (
dists < (dists_lower_bound - overlap_tolerance_hard)
)
# Compute the per atom clash.
# shape (N, 14)
per_atom_clash_mask = torch.maximum(
torch.amax(clash_mask, axis=(-4, -2)),
torch.amax(clash_mask, axis=(-3, -1)),
)
return {
"mean_loss": mean_loss, # shape ()
"per_atom_loss_sum": per_atom_loss_sum, # shape (N, 14)
"per_atom_clash_mask": per_atom_clash_mask, # shape (N, 14)
}
def within_residue_violations(
atom14_pred_positions: torch.Tensor,
atom14_atom_exists: torch.Tensor,
atom14_dists_lower_bound: torch.Tensor,
atom14_dists_upper_bound: torch.Tensor,
tighten_bounds_for_loss=0.0,
eps=1e-10,
) -> Dict[str, torch.Tensor]:
"""Loss to penalize steric clashes within residues.
This is a loss penalizing any steric violations or clashes of non-bonded atoms
in a given peptide. This loss corresponds to the part with
the same residues of
Jumper et al. (2021) Suppl. Sec. 1.9.11, eq 46.
Args:
atom14_pred_positions ([*, N, 14, 3]):
Predicted positions of atoms in global prediction frame.
atom14_atom_exists ([*, N, 14]):
Mask denoting whether atom at positions exists for given
amino acid type
atom14_dists_lower_bound ([*, N, 14]):
Lower bound on allowed distances.
atom14_dists_upper_bound ([*, N, 14]):
Upper bound on allowed distances
tighten_bounds_for_loss ([*, N]):
Extra factor to tighten loss
Returns:
Dict containing:
* 'per_atom_loss_sum' ([*, N, 14]):
sum of all clash losses per atom, shape
* 'per_atom_clash_mask' ([*, N, 14]):
mask whether atom clashes with any other atom shape
"""
# Compute the mask for each residue.
dists_masks = 1.0 - torch.eye(14, device=atom14_atom_exists.device)[None]
dists_masks = dists_masks.reshape(
*((1,) * len(atom14_atom_exists.shape[:-2])), *dists_masks.shape
)
dists_masks = (
atom14_atom_exists[..., :, :, None]
* atom14_atom_exists[..., :, None, :]
* dists_masks
)
# Distance matrix
dists = torch.sqrt(
eps
+ torch.sum(
(
atom14_pred_positions[..., :, :, None, :]
- atom14_pred_positions[..., :, None, :, :]
)
** 2,
dim=-1,
)
)
# Compute the loss.
dists_to_low_error = torch.nn.functional.relu(
atom14_dists_lower_bound + tighten_bounds_for_loss - dists
)
dists_to_high_error = torch.nn.functional.relu(
dists - (atom14_dists_upper_bound - tighten_bounds_for_loss)
)
loss = dists_masks * (dists_to_low_error + dists_to_high_error)
# Compute the per atom loss sum.
per_atom_loss_sum = torch.sum(loss, dim=-2) + torch.sum(loss, dim=-1)
# Compute the violations mask.
violations = dists_masks * (
(dists < atom14_dists_lower_bound) | (dists > atom14_dists_upper_bound)
)
# Compute the per atom violations.
per_atom_violations = torch.maximum(
torch.max(violations, dim=-2)[0], torch.max(violations, axis=-1)[0]
)
return {
"per_atom_loss_sum": per_atom_loss_sum,
"per_atom_violations": per_atom_violations,
}
def find_structural_violations(
batch: Dict[str, torch.Tensor],
atom14_pred_positions: torch.Tensor,
violation_tolerance_factor: float,
clash_overlap_tolerance: float,
**kwargs,
) -> Dict[str, torch.Tensor]:
"""Computes several checks for structural violations."""
# Compute between residue backbone violations of bonds and angles.
connection_violations = between_residue_bond_loss(
pred_atom_positions=atom14_pred_positions,
pred_atom_mask=batch["atom14_atom_exists"],
residue_index=batch["residue_index"],
aatype=batch["aatype"],
tolerance_factor_soft=violation_tolerance_factor,
tolerance_factor_hard=violation_tolerance_factor,
)
# Compute the Van der Waals radius for every atom
# (the first letter of the atom name is the element type).
# Shape: (N, 14).
atomtype_radius = [
residue_constants.van_der_waals_radius[name[0]]
for name in residue_constants.atom_types
]
atomtype_radius = atom14_pred_positions.new_tensor(atomtype_radius)
atom14_atom_radius = (
batch["atom14_atom_exists"]
* atomtype_radius[batch["residx_atom14_to_atom37"]]
)
# Compute the between residue clash loss.
between_residue_clashes = between_residue_clash_loss(
atom14_pred_positions=atom14_pred_positions,
atom14_atom_exists=batch["atom14_atom_exists"],
atom14_atom_radius=atom14_atom_radius,
residue_index=batch["residue_index"],
overlap_tolerance_soft=clash_overlap_tolerance,
overlap_tolerance_hard=clash_overlap_tolerance,
)
# Compute all within-residue violations (clashes,
# bond length and angle violations).
restype_atom14_bounds = residue_constants.make_atom14_dists_bounds(
overlap_tolerance=clash_overlap_tolerance,
bond_length_tolerance_factor=violation_tolerance_factor,
)
atom14_atom_exists = batch["atom14_atom_exists"]
atom14_dists_lower_bound = atom14_pred_positions.new_tensor(
restype_atom14_bounds["lower_bound"]
)[batch["aatype"]]
atom14_dists_upper_bound = atom14_pred_positions.new_tensor(
restype_atom14_bounds["upper_bound"]
)[batch["aatype"]]
residue_violations = within_residue_violations(
atom14_pred_positions=atom14_pred_positions,
atom14_atom_exists=batch["atom14_atom_exists"],
atom14_dists_lower_bound=atom14_dists_lower_bound,
atom14_dists_upper_bound=atom14_dists_upper_bound,
tighten_bounds_for_loss=0.0,
)
# Combine them to a single per-residue violation mask (used later for LDDT).
per_residue_violations_mask = torch.max(
torch.stack(
[
connection_violations["per_residue_violation_mask"],
torch.max(
between_residue_clashes["per_atom_clash_mask"], dim=-1
)[0],
torch.max(residue_violations["per_atom_violations"], dim=-1)[0],
],
dim=-1,
),
dim=-1,
)[0]
return {
"between_residues": {
"bonds_c_n_loss_mean": connection_violations["c_n_loss_mean"], # ()
"angles_ca_c_n_loss_mean": connection_violations[
"ca_c_n_loss_mean"
], # ()
"angles_c_n_ca_loss_mean": connection_violations[
"c_n_ca_loss_mean"
], # ()
"connections_per_residue_loss_sum": connection_violations[
"per_residue_loss_sum"
], # (N)
"connections_per_residue_violation_mask": connection_violations[
"per_residue_violation_mask"
], # (N)
"clashes_mean_loss": between_residue_clashes["mean_loss"], # ()
"clashes_per_atom_loss_sum": between_residue_clashes[
"per_atom_loss_sum"
], # (N, 14)
"clashes_per_atom_clash_mask": between_residue_clashes[
"per_atom_clash_mask"
], # (N, 14)
},
"within_residues": {
"per_atom_loss_sum": residue_violations[
"per_atom_loss_sum"
], # (N, 14)
"per_atom_violations": residue_violations[
"per_atom_violations"
], # (N, 14),
},
"total_per_residue_violations_mask": per_residue_violations_mask, # (N)
}
def find_structural_violations_np(
batch: Dict[str, np.ndarray],
atom14_pred_positions: np.ndarray,
config: ml_collections.ConfigDict,
) -> Dict[str, np.ndarray]:
to_tensor = lambda x: torch.tensor(x)
batch = tree_map(to_tensor, batch, np.ndarray)
atom14_pred_positions = to_tensor(atom14_pred_positions)
out = find_structural_violations(batch, atom14_pred_positions, **config)
to_np = lambda x: np.array(x)
np_out = tensor_tree_map(to_np, out)
return np_out
def extreme_ca_ca_distance_violations(
pred_atom_positions: torch.Tensor, # (N, 37(14), 3)
pred_atom_mask: torch.Tensor, # (N, 37(14))
residue_index: torch.Tensor, # (N)
max_angstrom_tolerance=1.5,
eps=1e-6,
) -> torch.Tensor:
"""Counts residues whose Ca is a large distance from its neighbour.
Measures the fraction of CA-CA pairs between consecutive amino acids that are
more than 'max_angstrom_tolerance' apart.
Args:
pred_atom_positions: Atom positions in atom37/14 representation
pred_atom_mask: Atom mask in atom37/14 representation
residue_index: Residue index for given amino acid, this is assumed to be
monotonically increasing.
max_angstrom_tolerance: Maximum distance allowed to not count as violation.
Returns:
Fraction of consecutive CA-CA pairs with violation.
"""
this_ca_pos = pred_atom_positions[..., :-1, 1, :]
this_ca_mask = pred_atom_mask[..., :-1, 1]
next_ca_pos = pred_atom_positions[..., 1:, 1, :]
next_ca_mask = pred_atom_mask[..., 1:, 1]
has_no_gap_mask = (residue_index[..., 1:] - residue_index[..., :-1]) == 1.0
ca_ca_distance = torch.sqrt(
eps + torch.sum((this_ca_pos - next_ca_pos) ** 2, dim=-1)
)
violations = (
ca_ca_distance - residue_constants.ca_ca
) > max_angstrom_tolerance
mask = this_ca_mask * next_ca_mask * has_no_gap_mask
mean = masked_mean(mask, violations, -1)
return mean
def compute_violation_metrics(
batch: Dict[str, torch.Tensor],
atom14_pred_positions: torch.Tensor, # (N, 14, 3)
violations: Dict[str, torch.Tensor],
) -> Dict[str, torch.Tensor]:
"""Compute several metrics to assess the structural violations."""
ret = {}
extreme_ca_ca_violations = extreme_ca_ca_distance_violations(
pred_atom_positions=atom14_pred_positions,
pred_atom_mask=batch["atom14_atom_exists"],
residue_index=batch["residue_index"],
)
ret["violations_extreme_ca_ca_distance"] = extreme_ca_ca_violations
ret["violations_between_residue_bond"] = masked_mean(
batch["seq_mask"],
violations["between_residues"][
"connections_per_residue_violation_mask"
],
dim=-1,
)
ret["violations_between_residue_clash"] = masked_mean(
mask=batch["seq_mask"],
value=torch.max(
violations["between_residues"]["clashes_per_atom_clash_mask"],
dim=-1,
)[0],
dim=-1,
)
ret["violations_within_residue"] = masked_mean(
mask=batch["seq_mask"],
value=torch.max(
violations["within_residues"]["per_atom_violations"], dim=-1
)[0],
dim=-1,
)
ret["violations_per_residue"] = masked_mean(
mask=batch["seq_mask"],
value=violations["total_per_residue_violations_mask"],
dim=-1,
)
return ret
def compute_violation_metrics_np(
batch: Dict[str, np.ndarray],
atom14_pred_positions: np.ndarray,
violations: Dict[str, np.ndarray],
) -> Dict[str, np.ndarray]:
to_tensor = lambda x: torch.tensor(x)
batch = tree_map(to_tensor, batch, np.ndarray)
atom14_pred_positions = to_tensor(atom14_pred_positions)
violations = tree_map(to_tensor, violations, np.ndarray)
out = compute_violation_metrics(batch, atom14_pred_positions, violations)
to_np = lambda x: np.array(x)
return tree_map(to_np, out, torch.Tensor)
def violation_loss(
violations: Dict[str, torch.Tensor],
atom14_atom_exists: torch.Tensor,
eps=1e-6,
**kwargs,
) -> torch.Tensor:
num_atoms = torch.sum(atom14_atom_exists)
l_clash = torch.sum(
violations["between_residues"]["clashes_per_atom_loss_sum"]
+ violations["within_residues"]["per_atom_loss_sum"]
)
l_clash = l_clash / (eps + num_atoms)
loss = (
violations["between_residues"]["bonds_c_n_loss_mean"]
+ violations["between_residues"]["angles_ca_c_n_loss_mean"]
+ violations["between_residues"]["angles_c_n_ca_loss_mean"]
+ l_clash
)
return loss
def compute_renamed_ground_truth(
batch: Dict[str, torch.Tensor],
atom14_pred_positions: torch.Tensor,
eps=1e-10,
) -> Dict[str, torch.Tensor]:
"""
Find optimal renaming of ground truth based on the predicted positions.
Alg. 26 "renameSymmetricGroundTruthAtoms"
This renamed ground truth is then used for all losses,
such that each loss moves the atoms in the same direction.
Args:
batch: Dictionary containing:
* atom14_gt_positions: Ground truth positions.
* atom14_alt_gt_positions: Ground truth positions with renaming swaps.
* atom14_atom_is_ambiguous: 1.0 for atoms that are affected by
renaming swaps.
* atom14_gt_exists: Mask for which atoms exist in ground truth.
* atom14_alt_gt_exists: Mask for which atoms exist in ground truth
after renaming.
* atom14_atom_exists: Mask for whether each atom is part of the given
amino acid type.
atom14_pred_positions: Array of atom positions in global frame with shape
Returns:
Dictionary containing:
alt_naming_is_better: Array with 1.0 where alternative swap is better.
renamed_atom14_gt_positions: Array of optimal ground truth positions
after renaming swaps are performed.
renamed_atom14_gt_exists: Mask after renaming swap is performed.
"""
pred_dists = torch.sqrt(
eps
+ torch.sum(
(
atom14_pred_positions[..., None, :, None, :]
- atom14_pred_positions[..., None, :, None, :, :]
)
** 2,
dim=-1,
)
)
atom14_gt_positions = batch["atom14_gt_positions"]
gt_dists = torch.sqrt(
eps
+ torch.sum(
(
atom14_gt_positions[..., None, :, None, :]
- atom14_gt_positions[..., None, :, None, :, :]
)
** 2,
dim=-1,
)
)
atom14_alt_gt_positions = batch["atom14_alt_gt_positions"]
alt_gt_dists = torch.sqrt(
eps
+ torch.sum(
(
atom14_alt_gt_positions[..., None, :, None, :]
- atom14_alt_gt_positions[..., None, :, None, :, :]
)
** 2,
dim=-1,
)
)
lddt = torch.sqrt(eps + (pred_dists - gt_dists) ** 2)
alt_lddt = torch.sqrt(eps + (pred_dists - alt_gt_dists) ** 2)
atom14_gt_exists = batch["atom14_gt_exists"]
atom14_atom_is_ambiguous = batch["atom14_atom_is_ambiguous"]
mask = (
atom14_gt_exists[..., None, :, None]
* atom14_atom_is_ambiguous[..., None, :, None]
* atom14_gt_exists[..., None, :, None, :]
* (1.0 - atom14_atom_is_ambiguous[..., None, :, None, :])
)
per_res_lddt = torch.sum(mask * lddt, dim=(-1, -2, -3))
alt_per_res_lddt = torch.sum(mask * alt_lddt, dim=(-1, -2, -3))
fp_type = atom14_pred_positions.dtype
alt_naming_is_better = (alt_per_res_lddt < per_res_lddt).type(fp_type)
renamed_atom14_gt_positions = (
1.0 - alt_naming_is_better[..., None, None]
) * atom14_gt_positions + alt_naming_is_better[
..., None, None
] * atom14_alt_gt_positions
renamed_atom14_gt_mask = (
1.0 - alt_naming_is_better[..., None]
) * atom14_gt_exists + alt_naming_is_better[..., None] * batch[
"atom14_alt_gt_exists"
]
return {
"alt_naming_is_better": alt_naming_is_better,
"renamed_atom14_gt_positions": renamed_atom14_gt_positions,
"renamed_atom14_gt_exists": renamed_atom14_gt_mask,
}
def experimentally_resolved_loss(
logits: torch.Tensor,
atom37_atom_exists: torch.Tensor,
all_atom_mask: torch.Tensor,
resolution: torch.Tensor,
min_resolution: float,
max_resolution: float,
eps: float = 1e-8,
**kwargs,
) -> torch.Tensor:
errors = sigmoid_cross_entropy(logits, all_atom_mask)
loss = torch.sum(errors * atom37_atom_exists, dim=-1)
loss = loss / (eps + torch.sum(atom37_atom_exists, dim=(-1, -2)))
loss = torch.sum(loss, dim=-1)
loss = loss * (
(resolution >= min_resolution) & (resolution <= max_resolution)
)
loss = torch.mean(loss)
return loss
def masked_msa_loss(logits, true_msa, bert_mask, eps=1e-8, **kwargs):
"""
Computes BERT-style masked MSA loss. Implements subsection 1.9.9.
Args:
logits: [*, N_seq, N_res, 23] predicted residue distribution
true_msa: [*, N_seq, N_res] true MSA
bert_mask: [*, N_seq, N_res] MSA mask
Returns:
Masked MSA loss
"""
errors = softmax_cross_entropy(
logits, torch.nn.functional.one_hot(true_msa, num_classes=23)
)
# FP16-friendly averaging. Equivalent to:
# loss = (
# torch.sum(errors * bert_mask, dim=(-1, -2)) /
# (eps + torch.sum(bert_mask, dim=(-1, -2)))
# )
loss = errors * bert_mask
loss = torch.sum(loss, dim=-1)
scale = 0.5
denom = eps + torch.sum(scale * bert_mask, dim=(-1, -2))
loss = loss / denom[..., None]
loss = torch.sum(loss, dim=-1)
loss = loss * scale
loss = torch.mean(loss)
return loss
def compute_drmsd(structure_1, structure_2, mask=None):
if(mask is not None):
structure_1 = structure_1 * mask[..., None]
structure_2 = structure_2 * mask[..., None]
d1 = structure_1[..., :, None, :] - structure_1[..., None, :, :]
d2 = structure_2[..., :, None, :] - structure_2[..., None, :, :]
d1 = d1 ** 2
d2 = d2 ** 2
d1 = torch.sqrt(torch.sum(d1, dim=-1))
d2 = torch.sqrt(torch.sum(d2, dim=-1))
drmsd = d1 - d2
drmsd = drmsd ** 2
drmsd = torch.sum(drmsd, dim=(-1, -2))
n = d1.shape[-1] if mask is None else torch.sum(mask, dim=-1)
drmsd = drmsd * (1 / (n * (n - 1)))
drmsd = torch.sqrt(drmsd)
return drmsd
def compute_drmsd_np(structure_1, structure_2, mask=None):
structure_1 = torch.tensor(structure_1)
structure_2 = torch.tensor(structure_2)
if(mask is not None):
mask = torch.tensor(mask)
return compute_drmsd(structure_1, structure_2, mask)
class AlphaFoldLoss(nn.Module):
"""Aggregation of the various losses described in the supplement"""
def __init__(self, config):
super(AlphaFoldLoss, self).__init__()
self.config = config
def forward(self, out, batch, _return_breakdown=False):
if "violation" not in out.keys():
out["violation"] = find_structural_violations(
batch,
out["sm"]["positions"][-1],
**self.config.violation,
)
if "renamed_atom14_gt_positions" not in out.keys():
batch.update(
compute_renamed_ground_truth(
batch,
out["sm"]["positions"][-1],
)
)
loss_fns = {
"distogram": lambda: distogram_loss(
logits=out["distogram_logits"],
**{**batch, **self.config.distogram},
),
"experimentally_resolved": lambda: experimentally_resolved_loss(
logits=out["experimentally_resolved_logits"],
**{**batch, **self.config.experimentally_resolved},
),
"fape": lambda: fape_loss(
out,
batch,
self.config.fape,
),
"lddt": lambda: lddt_loss(
logits=out["lddt_logits"],
all_atom_pred_pos=out["final_atom_positions"],
**{**batch, **self.config.lddt},
),
"masked_msa": lambda: masked_msa_loss(
logits=out["masked_msa_logits"],
**{**batch, **self.config.masked_msa},
),
"supervised_chi": lambda: supervised_chi_loss(
out["sm"]["angles"],
out["sm"]["unnormalized_angles"],
**{**batch, **self.config.supervised_chi},
),
"violation": lambda: violation_loss(
out["violation"],
**batch,
),
}
if(self.config.tm.enabled):
loss_fns["tm"] = lambda: tm_loss(
logits=out["tm_logits"],
**{**batch, **out, **self.config.tm},
)
cum_loss = 0.
losses = {}
for loss_name, loss_fn in loss_fns.items():
weight = self.config[loss_name].weight
loss = loss_fn()
if(torch.isnan(loss) or torch.isinf(loss)):
logging.warning(f"{loss_name} loss is NaN. Skipping...")
loss = loss.new_tensor(0., requires_grad=True)
cum_loss = cum_loss + weight * loss
losses[loss_name] = loss.detach().clone()
losses["unscaled_loss"] = cum_loss.detach().clone()
# Scale the loss by the square root of the minimum of the crop size and
# the (average) sequence length. See subsection 1.9.
seq_len = torch.mean(batch["seq_length"].float())
crop_len = batch["aatype"].shape[-1]
cum_loss = cum_loss * torch.sqrt(min(seq_len, crop_len))
if(not _return_breakdown):
return cum_loss
return cum_loss, losses
| 32.656059 | 82 | 0.630266 |
09021cb002427d97a2baf0916dea8a153dca19f5 | 31,534 | py | Python | utility.py | kant/RenderStackNode | 19876fc75a03edf36ae27837d193509907adbd4a | [
"Apache-2.0"
] | null | null | null | utility.py | kant/RenderStackNode | 19876fc75a03edf36ae27837d193509907adbd4a | [
"Apache-2.0"
] | null | null | null | utility.py | kant/RenderStackNode | 19876fc75a03edf36ae27837d193509907adbd4a | [
"Apache-2.0"
] | null | null | null | import os
import logging
import time
import re
import numpy as np
from itertools import groupby
from collections import deque
from functools import lru_cache, wraps
import bpy
from mathutils import Color, Vector
from .preferences import get_pref
# init logger
LOG_FORMAT = "%(asctime)s - RSN-%(levelname)s - %(message)s"
logging.basicConfig(format=LOG_FORMAT)
logger = logging.getLogger('mylogger')
# get the update time
def timefn(fn):
@wraps(fn)
def measure_time(*args, **kwargs):
t1 = time.time()
result = fn(*args, **kwargs)
t2 = time.time()
s = f'{(t2 - t1) * 1000: .4f} ms'
bpy.context.window_manager.rsn_tree_time = s
logger.info(f"RSN Tree: update took{s}\n")
return result
return measure_time
def source_attr(src_obj, scr_data_path):
def get_obj_and_attr(obj, data_path):
path = data_path.split('.')
if len(path) == 1:
return obj, path[0]
else:
back_obj = getattr(obj, path[0])
back_path = '.'.join(path[1:])
return get_obj_and_attr(back_obj, back_path)
return get_obj_and_attr(src_obj, scr_data_path)
def compare(obj: object, attr: str, val):
"""Use for compare and apply attribute since some properties change may cause depsgraph changes"""
try:
if getattr(obj, attr) != val:
setattr(obj, attr, val)
logger.debug(f'Attribute "{attr}" SET “{val}”')
except AttributeError as e:
logger.info(e)
class RSN_NodeTree:
"""To store context node tree for getting data in renderstack"""
def get_context_tree(self, return_name=False):
try:
name = bpy.context.space_data.edit_tree.name
return bpy.context.space_data.edit_tree.name if return_name else bpy.data.node_groups[name]
except:
return None
def set_wm_node_tree(self, node_tree_name):
bpy.context.window_manager.rsn_cur_tree_name = node_tree_name
def get_wm_node_tree(self, get_name=False):
name = bpy.context.window_manager.rsn_cur_tree_name
if get_name:
return name
else:
return bpy.data.node_groups[name]
def set_context_tree_as_wm_tree(self):
tree_name = self.get_context_tree(return_name=1)
if tree_name:
self.set_wm_node_tree(tree_name)
# class RSN_Gpaph:
# def __init__(self, node_tree, root_node_name):
# self.nt = node_tree
# self.root_node = self.get_node_from_name(root_node_name)
#
# def get_children_from_node(self, root_node, pass_mute=True) -> list:
# """Depth first search
# :parm root_node: a blender node
# nodes append from left to right, from top to bottom
# """
# node_list = []
#
# # @lru_cache(maxsize=None)
# def get_sub_node(node, pass_mute_node=True):
# """Recursion
# :parm node: a blender node
#
# """
# for i, input in enumerate(node.inputs):
# if input.is_linked:
# try:
# sub_node = input.links[0].from_node
# if sub_node.mute and pass_mute_node: continue
#
# get_sub_node(sub_node)
# # This error shows when the dragging the link off viewer node(Works well with knife tool)
# # this seems to be a blender error
# except IndexError:
# pass
# else:
# continue
# # Skip the reroute node
# if node.bl_idname != 'NodeReroute':
# if len(node_list) == 0 or (len(node_list) != 0 and node.name != node_list[-1]):
# node_list.append(node.name)
#
# get_sub_node(root_node, pass_mute)
class RSN_Nodes:
"""Tree method"""
def __init__(self, node_tree, root_node_name):
self.nt = node_tree
self.root_node = self.get_node_from_name(root_node_name)
def get_node_from_name(self, name):
return self.nt.nodes.get(name)
def get_root_node(self):
return self.root_node
def get_children_from_node(self, root_node, pass_mute=True):
"""Depth first search
:parm root_node: a blender node
"""
node_list = []
def append_node_to_list(node):
"""Skip the reroute node"""
if node.bl_idname != 'NodeReroute':
if len(node_list) == 0 or (len(node_list) != 0 and node.name != node_list[-1]):
node_list.append(node.name)
# @lru_cache(maxsize=None)
def get_sub_node(node, pass_mute_node=True):
"""Recursion
:parm node: a blender node
"""
for i, input in enumerate(node.inputs):
if input.is_linked:
try:
sub_node = input.links[0].from_node
if sub_node.mute and pass_mute_node:
continue
else:
get_sub_node(sub_node)
# This error shows when the dragging the link off viewer node(Works well with knife tool)
# this seems to be a blender error
except IndexError:
pass
else:
continue
# nodes append from left to right, from top to bottom
append_node_to_list(node)
get_sub_node(root_node, pass_mute)
return node_list
def get_sub_node_dict_from_node_list(self, node_list, parent_node_type, black_list=None):
"""Use Task node as separator to get sub nodes in this task
:parm node_list:
:parm parent_node_type: node.bl_idname: str
:parm black_list: list node.bl_idname that you want to skip
"""
node_list_dict = {}
if not black_list: black_list = ['RSNodeTaskListNode', 'RSNodeRenderListNode']
node_list[:] = [node for node in node_list if
self.nt.nodes[node].bl_idname not in black_list]
# separate nodes with the node type input
children_node_list = [list(g) for k, g in
groupby(node_list, lambda name: self.nt.nodes[name].bl_idname == parent_node_type) if
not k]
# get the node type input
parent_node_list = [node for node in node_list if self.nt.nodes[node].bl_idname == parent_node_type]
# make a dict {parent name:[children list]}
for i in range(len(parent_node_list)):
try:
node_list_dict[parent_node_list[i]] = children_node_list[i]
# release the node behind the parent
except IndexError:
pass
return node_list_dict
def get_children_from_var_node(self, var_node, active, pass_mute=True):
"""Depth first search for the Variants children
:parm var_node: a blender node
:parm active:the active input of the Variants node
"""
black_list = [] # list of nodes to remove from the origin node list
def append_node_to_list(node):
"""Skip the reroute node"""
if node.bl_idname != 'NodeReroute':
if len(black_list) == 0 or (len(black_list) != 0 and node.name != black_list[-1]):
if node.bl_idname != 'RSNodeVariantsNode': black_list.append(node.name)
# @lru_cache(maxsize=None)
def get_sub_node(node, pass_mute_node=True):
"""Recursion
:parm node: a blender node
"""
for i, input in enumerate(node.inputs):
if input.is_linked and True in (i != active, node.bl_idname != 'RSNodeVariantsNode'):
try:
sub_node = input.links[0].from_node
if sub_node.mute and pass_mute_node:
continue
else:
get_sub_node(sub_node)
# This error shows when the dragging the link off viewer node(Works well with knife tool)
# this seems to be a blender error
except IndexError:
pass
else:
continue
# nodes append from left to right, from top to bottom
append_node_to_list(node)
get_sub_node(var_node, pass_mute)
return black_list
def get_children_from_task(self, task_name, return_dict=False, type='RSNodeTaskNode'):
"""pack method for task node
:parm task_name: name of the task node
:parm return_dict: return dict instead of node list
{'task node name':[
children node name1,
children node name2]
}
:parm type: the bl_idname of the node (key for the dict)
"""
task = self.get_node_from_name(task_name)
try:
node_list = self.get_children_from_node(task)
# VariantsNodeProperty node in each task
# only one set VariantsNodeProperty node will be active
var_collect = {}
for node_name in node_list:
set_var_node = self.nt.nodes[node_name]
if set_var_node.bl_idname == 'RSNodeSetVariantsNode':
for item in set_var_node.node_collect:
if item.use:
var_collect[item.name] = item.active
break
for node_name, active in var_collect.items():
var_node = self.nt.nodes[node_name]
black_list = self.get_children_from_var_node(var_node, active)
node_list = [i for i in node_list if i not in black_list]
# return clean node list
if not return_dict:
return node_list
else:
return self.get_sub_node_dict_from_node_list(node_list=node_list,
parent_node_type=type)
except AttributeError:
pass
def get_children_from_render_list(self, return_dict=False, type='RSNodeTaskNode'):
"""pack method for render list node(get all task)
"""
render_list = self.get_node_from_name(self.root_node.name)
node_list = self.get_children_from_node(render_list)
if not return_dict:
return node_list
else:
return self.get_sub_node_dict_from_node_list(node_list=node_list,
parent_node_type=type)
def graph(self):
node_list = self.get_children_from_node(self.root_node)
def get_task_data(self, task_name, task_dict):
"""transfer nodes to data
:parm task_name: name of the task node
:parm task_dict: parse dict
{'task node name':[
children node name1,
children node name2]
}
"""
task_data = {}
for node_name in task_dict[task_name]:
node = self.nt.nodes[node_name]
node.debug()
# task node
task_node = self.nt.nodes[task_name]
task_data['name'] = task_name
task_data['label'] = task_node.label
# old method/nodes
#####################
# Object select Nodes
if node.bl_idname == 'RSNodePropertyInputNode':
if 'property' not in task_data:
task_data['property'] = {}
task_data['property'].update(node.get_data())
elif node.bl_idname == 'RSNodeObjectDataNode':
if 'object_data' not in task_data:
task_data['object_data'] = {}
task_data['object_data'].update(node.get_data())
elif node.bl_idname == 'RSNodeObjectModifierNode':
if 'object_modifier' not in task_data:
task_data['object_modifier'] = {}
task_data['object_modifier'].update(node.get_data())
elif node.bl_idname in 'RSNodeObjectDisplayNode':
if 'object_display' not in task_data:
task_data['object_display'] = {}
task_data['object_display'].update(node.get_data())
elif node.bl_idname == 'RSNodeCollectionDisplayNode':
if 'collection_display' not in task_data:
task_data['collection_display'] = {}
task_data['collection_display'].update(node.get_data())
elif node.bl_idname == 'RSNodeObjectMaterialNode':
if 'object_material' not in task_data:
task_data['object_material'] = {}
task_data['object_material'].update(node.get_data())
elif node.bl_idname == 'RSNodeObjectPSRNode':
if 'object_psr' not in task_data:
task_data['object_psr'] = {}
task_data['object_psr'].update(node.get_data())
elif node.bl_idname == 'RSNodeViewLayerPassesNode':
if 'view_layer_passes' not in task_data:
task_data['view_layer_passes'] = {}
task_data['view_layer_passes'].update(node.get_data())
elif node.bl_idname == 'RSNodeSmtpEmailNode':
if 'email' not in task_data:
task_data['email'] = {}
task_data['email'].update(node.get_data())
elif node.bl_idname == 'RSNodeScriptsNode':
if node.type == 'SINGLE':
if 'scripts' not in task_data:
task_data['scripts'] = {}
task_data['scripts'].update(node.get_data())
else:
if 'scripts_file' not in task_data:
task_data['scripts_file'] = {}
task_data['scripts_file'].update(node.get_data())
# Single node
else:
try:
task_data.update(node.get_data())
except TypeError:
pass
return task_data
class RenderQueue():
def __init__(self, nodetree, render_list_node):
"""init a rsn queue
:parm nodetree: a blender node tree(rsn node tree)
:parm render_list_node: render_list_node
"""
self.nt = nodetree
self.root_node = render_list_node
self.task_queue = deque()
self.task_list = []
self.init_queue()
def init_queue(self):
for item in self.root_node.task_list:
if item.render:
self.task_queue.append(item.name)
self.task_list.append(item.name)
# for processing visualization
bpy.context.window_manager.rsn_cur_task_list = ','.join(self.task_list)
def is_empty(self):
return len(self.task_queue) == 0
def get_frame_range(self):
self.force_update()
# if not frame range node input, consider as render single frmae
frame_start = frame_end = bpy.context.scene.frame_start
frame_step = bpy.context.scene.frame_step
# search frame range node
node_list = bpy.context.window_manager.rsn_node_list.split(',')
frame_range_nodes = [self.nt.nodes[name] for name in node_list if
self.nt.nodes[name].bl_idname == 'RSNodeFrameRangeInputNode']
# get the last frame range node for current task
if len(frame_range_nodes) != 0:
node = frame_range_nodes[-1]
frame_start = node.frame_start
frame_end = node.frame_end
frame_step = node.frame_step
return frame_start, frame_end, frame_step
def force_update(self):
if not self.is_empty():
self.nt.nodes[self.task_queue[0]].is_active_task = True
def pop(self):
if not self.is_empty():
return self.task_queue.popleft()
def clear_queue(self):
self.task_queue.clear()
bpy.context.window_manager.rsn_cur_task_list = ''
class RSN_OLD_TaskUpdater():
def __init__(self, node_tree, task_data):
self.task_data = task_data
self.nt = node_tree
def warning_node_color(self, node_name, msg=''):
"""
:parm e: error message
use try to catch error because user may use task info node to input settings
"""
try:
node = self.nt.nodes[node_name]
node.set_warning(msg=msg)
except Exception as e:
print(e)
def update_all(self):
if not self.task_data: return None
pref = get_pref()
self.update_camera()
self.update_color_management()
self.update_res()
self.update_render_engine()
self.update_property()
self.update_collection_display()
self.update_object_display()
self.update_object_psr()
self.update_object_data()
self.update_object_material()
self.update_object_modifier()
self.update_frame_range()
self.updata_view_layer()
self.update_image_format()
self.update_slots()
self.update_world()
self.ssm_light_studio()
if pref.node_task.update_scripts:
self.updata_scripts()
if pref.node_task.update_path:
self.update_path()
if pref.node_task.update_view_layer_passes:
self.update_view_layer_passes()
self.send_email()
def update_color_management(self):
"""may change in 2.93 version"""
if 'ev' in self.task_data:
vs = bpy.context.scene.view_settings
compare(vs, 'exposure', self.task_data['ev'])
compare(vs, 'gamma', self.task_data['gamma'])
try:
compare(vs, 'view_transform', self.task_data['view_transform'])
compare(vs, 'look', self.task_data['look'])
except: # ocio change in 2.93
pass
def update_path(self):
dir = self.make_path()
postfix = self.get_postfix()
rn = bpy.context.scene.render
compare(rn, 'use_file_extension', 1)
compare(rn, 'filepath', os.path.join(dir, postfix))
def make_path(self):
"""only save files will work"""
task = self.task_data
if 'path' in task:
if task['path'] == '//':
directory_path = bpy.path.abspath(task['path'])
else:
directory_path = os.path.dirname(task['path'])
try:
if not os.path.exists(directory_path):
os.makedirs(directory_path)
return directory_path
except Exception as e:
self.report({'ERROR'}, f'File Path: No Such a Path')
else:
return '//'
def get_postfix(self):
"""path expression"""
scn = bpy.context.scene
cam = scn.camera
blend_name = ''
postfix = ''
if 'path' in self.task_data:
postfix = self.task_data["path_expression"]
# replace camera name
if cam:
postfix = postfix.replace('$camera', cam.name)
else:
postfix = postfix
# replace engine
postfix = postfix.replace('$engine', bpy.context.scene.render.engine)
# replace res
postfix = postfix.replace('$res', f"{scn.render.resolution_x}x{scn.render.resolution_y}")
# replace label
postfix = postfix.replace('$label', self.task_data["label"])
# replace view_layer
postfix = postfix.replace('$vl', bpy.context.view_layer.name)
# version_
postfix = postfix.replace('$V', self.task_data["version"])
# frame completion
STYLE = re.findall(r'([$]F\d)', postfix)
if len(STYLE) > 0:
c_frame = bpy.context.scene.frame_current
for i, string in enumerate(STYLE):
format = f'0{STYLE[i][-1:]}d'
postfix = postfix.replace(STYLE[i], f'{c_frame:{format}}')
# time format
TIME = re.findall(r'([$]T{.*?})', postfix)
if len(TIME) > 0:
for i, string in enumerate(TIME):
format = time.strftime(TIME[i][3:-1], time.localtime())
postfix = postfix.replace(TIME[i], format)
# replace filename
try:
blend_name = bpy.path.basename(bpy.data.filepath)[:-6]
postfix = postfix.replace('$blend', blend_name)
except Exception:
return 'untitled'
return postfix
def update_view_layer_passes(self):
"""each view layer will get a file output node
but I recommend to save an Multilayer exr file instead of use this node
"""
if 'view_layer_passes' in self.task_data:
for node_name, dict in self.task_data['view_layer_passes'].items():
try:
bpy.ops.rsn.creat_compositor_node(
view_layer=self.task_data['view_layer_passes'][node_name]['view_layer'],
use_passes=self.task_data['view_layer_passes'][node_name]['use_passes'])
except Exception as e:
logger.warning(f'View Layer Passes {node_name} error', exc_info=e)
else:
bpy.ops.rsn.creat_compositor_node(use_passes=0, view_layer=bpy.context.window.view_layer.name)
def update_property(self):
if 'property' in self.task_data:
for node_name, dict in self.task_data['property'].items():
try:
obj = eval(dict['full_data_path'])
value = dict['value']
if obj != value:
exec(f"{dict['full_data_path']}={value}")
except Exception as e:
self.warning_node_color(node_name, f'Full data path error!\n{e}')
def update_object_display(self):
if 'object_display' in self.task_data:
for node_name, dict in self.task_data['object_display'].items():
ob = eval(dict['object'])
compare(ob, 'hide_viewport', dict['hide_viewport'])
compare(ob, 'hide_render', dict['hide_render'])
def update_collection_display(self):
if 'collection_display' in self.task_data:
for node_name, dict in self.task_data['collection_display'].items():
ob = eval(dict['collection'])
compare(ob, 'hide_viewport', dict['hide_viewport'])
compare(ob, 'hide_render', dict['hide_render'])
def update_object_psr(self):
if 'object_psr' in self.task_data:
for node_name, dict in self.task_data['object_psr'].items():
ob = eval(dict['object'])
if 'location' in dict:
compare(ob, 'location', dict['location'])
if 'scale' in dict:
compare(ob, 'scale', dict['scale'])
if 'rotation' in dict:
compare(ob, 'rotation_euler', dict['rotation'])
def update_object_material(self):
if 'object_material' in self.task_data:
for node_name, dict in self.task_data['object_material'].items():
ob = eval(dict['object'])
try:
if ob.material_slots[dict['slot_index']].material.name != dict['new_material']:
ob.material_slots[dict['slot_index']].material = bpy.data.materials[dict['new_material']]
except Exception as e:
pass
def update_object_data(self):
if 'object_data' in self.task_data:
for node_name, dict in self.task_data['object_data'].items():
ob = eval(dict['object'])
value = dict['value']
obj, attr = source_attr(ob.data, dict['data_path'])
compare(obj, attr, value)
def update_object_modifier(self):
if 'object_modifier' in self.task_data:
for node_name, dict in self.task_data['object_modifier'].items():
ob = eval(dict['object'])
value = dict['value']
match = re.match(r"modifiers[[](.*?)[]]", dict['data_path'])
name = match.group(1)
if name:
data_path = dict['data_path'].split('.')[-1]
modifier = ob.modifiers[name[1:-1]]
compare(modifier, data_path, value)
def update_slots(self):
if 'render_slot' in self.task_data:
compare(bpy.data.images['Render Result'].render_slots, 'active_index', self.task_data['render_slot'])
def update_world(self):
if 'world' in self.task_data:
if bpy.context.scene.world.name != self.task_data['world']:
bpy.context.scene.world = bpy.data.worlds[self.task_data['world']]
def ssm_light_studio(self):
if 'ssm_light_studio' in self.task_data:
index = self.task_data['ssm_light_studio']
try:
compare(bpy.context.scene.ssm, 'light_studio_index', index)
except Exception as e:
logger.warning(f'SSM LightStudio node error', exc_info=e)
def send_email(self):
if 'email' in self.task_data:
for node_name, email_dict in self.task_data['email'].items():
try:
bpy.ops.rsn.send_email(subject=email_dict['subject'],
content=email_dict['content'],
sender_name=email_dict['sender_name'],
email=email_dict['email'])
except Exception as e:
self.warning_node_color(node_name, str(e))
def updata_view_layer(self):
if 'view_layer' in self.task_data and bpy.context.window.view_layer.name != self.task_data['view_layer']:
bpy.context.window.view_layer = bpy.context.scene.view_layers[self.task_data['view_layer']]
def updata_scripts(self):
if 'scripts' in self.task_data:
for node_name, value in self.task_data['scripts'].items():
try:
exec(value)
except Exception as e:
self.warning_node_color(node_name, str(e))
if 'scripts_file' in self.task_data:
for node_name, file_name in self.task_data['scripts_file'].items():
try:
c = bpy.data.texts[file_name].as_string()
exec(c)
except Exception as e:
self.warning_node_color(node_name, str(e))
def update_image_format(self):
if 'image_settings' in self.task_data:
rn = bpy.context.scene.render
image_settings = self.task_data['image_settings']
compare(rn.image_settings, 'file_format', image_settings['file_format'])
compare(rn.image_settings, 'color_mode', image_settings['color_mode'])
compare(rn.image_settings, 'color_depth', image_settings['color_depth'])
compare(rn.image_settings, 'use_preview', image_settings['use_preview'])
compare(rn.image_settings, 'compression', image_settings['compression'])
compare(rn.image_settings, 'quality', image_settings['quality'])
compare(rn, 'film_transparent', image_settings['transparent'])
def update_frame_range(self):
if "frame_start" in self.task_data:
scn = bpy.context.scene
compare(scn, 'frame_start', self.task_data['frame_start'])
compare(scn, 'frame_end', self.task_data['frame_end'])
compare(scn, 'frame_step', self.task_data['frame_step'])
def update_render_engine(self):
engines = ['BLENDER_EEVEE', 'BLENDER_WORKBENCH'] + [engine.bl_idname for engine in
bpy.types.RenderEngine.__subclasses__()]
has_engine = None
# engine settings
if 'engine' in self.task_data:
if self.task_data['engine'] in engines:
compare(bpy.context.scene.render, 'engine', self.task_data['engine'])
has_engine = True
# samples
if 'samples' in self.task_data:
if self.task_data['engine'] == "BLENDER_EEVEE":
compare(bpy.context.scene.eevee, 'taa_render_samples', self.task_data['samples'])
elif self.task_data['engine'] == "CYCLES":
compare(bpy.context.scene.cycles, 'samples', self.task_data['samples'])
# CYCLES
if 'cycles_light_path' in self.task_data:
for key, value in self.task_data['cycles_light_path'].items():
compare(bpy.context.scene.cycles, key, value)
if not has_engine: return None
# luxcore
if 'luxcore_half' in self.task_data and 'BlendLuxCore' in bpy.context.preferences.addons:
if not bpy.context.scene.luxcore.halt.enable:
bpy.context.scene.luxcore.halt.enable = True
if self.task_data['luxcore_half']['use_samples'] is False and self.task_data['luxcore_half'][
'use_time'] is False:
bpy.context.scene.luxcore.halt.use_samples = True
elif self.task_data['luxcore_half']['use_samples'] is True and self.task_data['luxcore_half'][
'use_time'] is False:
if not bpy.context.scene.luxcore.halt.use_samples:
bpy.context.scene.luxcore.halt.use_samples = True
if bpy.context.scene.luxcore.halt.use_time:
bpy.context.scene.luxcore.halt.use_time = False
compare(bpy.context.scene.luxcore.halt, 'samples', self.task_data['luxcore_half']['samples'])
elif self.task_data['luxcore_half']['use_samples'] is False and self.task_data['luxcore_half'][
'use_time'] is True:
if bpy.context.scene.luxcore.halt.use_samples:
bpy.context.scene.luxcore.halt.use_samples = False
if not bpy.context.scene.luxcore.halt.use_time:
bpy.context.scene.luxcore.halt.use_time = True
compare(bpy.context.scene.luxcore.halt, 'time', self.task_data['luxcore_half']['time'])
# octane
elif 'octane' in self.task_data and 'octane' in bpy.context.preferences.addons:
for key, value in self.task_data['octane'].items():
compare(bpy.context.scene.octane, key, value)
def update_res(self):
if 'res_x' in self.task_data:
rn = bpy.context.scene.render
compare(rn, 'resolution_x', self.task_data['res_x'])
compare(rn, 'resolution_y', self.task_data['res_y'])
compare(rn, 'resolution_percentage', self.task_data['res_scale'])
def update_camera(self):
if 'camera' in self.task_data and self.task_data['camera']:
cam = eval(self.task_data['camera'])
if cam: compare(bpy.context.scene, 'camera', cam)
| 38.787208 | 115 | 0.568371 |
1f025ca0866e4c65c06369a8bbc75e717140fd0b | 1,236 | py | Python | 2015/06/fc_2015_06_08.py | mfwarren/FreeCoding | 58ac87f35ad2004a3514782556762ee0ed72c39a | [
"MIT"
] | null | null | null | 2015/06/fc_2015_06_08.py | mfwarren/FreeCoding | 58ac87f35ad2004a3514782556762ee0ed72c39a | [
"MIT"
] | 1 | 2015-04-27T01:43:45.000Z | 2015-04-27T01:43:45.000Z | 2015/06/fc_2015_06_08.py | mfwarren/FreeCoding | 58ac87f35ad2004a3514782556762ee0ed72c39a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# imports go here
import matplotlib.pyplot as plt
from sklearn import datasets, svm, metrics
#
# Free Coding session for 2015-06-08
# Written by Matt Warren
#
digits = datasets.load_digits()
images = list(zip(digits.images, digits.target))
for i, (image, label) in enumerate(images[:4]):
plt.subplot(2, 4, i + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
classifier = svm.SVC(gamma=0.001)
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("report %s\n%s\n" % (classifier, metrics.classification_report(expected, predicted)))
print("confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for i, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, i + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| 30.9 | 91 | 0.713592 |
8d3821361d8f670feadf94d5e15bb148a849f793 | 10 | py | Python | HonkaiPython/test5.py | ReZeroE/Honkai-Impact-AutoPlay-Driver | 096b23cce401dd32dc8ada5d06464e3d902d0b59 | [
"MIT"
] | null | null | null | HonkaiPython/test5.py | ReZeroE/Honkai-Impact-AutoPlay-Driver | 096b23cce401dd32dc8ada5d06464e3d902d0b59 | [
"MIT"
] | null | null | null | HonkaiPython/test5.py | ReZeroE/Honkai-Impact-AutoPlay-Driver | 096b23cce401dd32dc8ada5d06464e3d902d0b59 | [
"MIT"
] | null | null | null | import wmi | 10 | 10 | 0.9 |
b0bef04c62f0481be11ea23f10033dd38ca931cd | 20,252 | py | Python | smart_home/Camera.py | shanbs/netatmo-api-python | bb714189868bb5235ad4d1241dd4818fe633fe70 | [
"MIT"
] | null | null | null | smart_home/Camera.py | shanbs/netatmo-api-python | bb714189868bb5235ad4d1241dd4818fe633fe70 | [
"MIT"
] | null | null | null | smart_home/Camera.py | shanbs/netatmo-api-python | bb714189868bb5235ad4d1241dd4818fe633fe70 | [
"MIT"
] | null | null | null | """
coding=utf-8
"""
import imghdr
import time
from urllib.error import URLError
from . import NoDevice, postRequest, _BASE_URL
_GETHOMEDATA_REQ = _BASE_URL + "api/gethomedata"
_GETCAMERAPICTURE_REQ = _BASE_URL + "api/getcamerapicture"
_GETEVENTSUNTIL_REQ = _BASE_URL + "api/geteventsuntil"
class CameraData:
"""
List the Netatmo cameras informations
(Homes, cameras, modules, events, persons)
Args:
authData (ClientAuth):
Authentication information with a working access Token
"""
def __init__(self, authData, size=15):
self.getAuthToken = authData.accessToken
postParams = {"access_token": self.getAuthToken, "size": size}
resp = postRequest(_GETHOMEDATA_REQ, postParams)
if "body" not in resp:
raise URLError("No data returned by Netatmo server")
self.rawData = resp["body"]
self.homes = {d["id"]: d for d in self.rawData["homes"]}
if not self.homes:
raise NoDevice("No camera available")
self.persons = dict()
self.events = dict()
self.outdoor_events = dict()
self.cameras = dict()
self.modules = dict()
self.lastEvent = dict()
self.outdoor_lastEvent = dict()
self.types = dict()
self.default_home = None
self.default_camera = None
for i in range(len(self.rawData["homes"])):
nameHome = self.rawData["homes"][i]["name"]
if nameHome not in self.cameras:
self.cameras[nameHome] = dict()
if nameHome not in self.types:
self.types[nameHome] = dict()
for p in self.rawData["homes"][i]["persons"]:
self.persons[p["id"]] = p
if "events" in self.rawData["homes"][i]:
self.default_home = self.rawData["homes"][i]["name"]
for e in self.rawData["homes"][i]["events"]:
if e["type"] == "outdoor":
if e["camera_id"] not in self.outdoor_events:
self.outdoor_events[e["camera_id"]] = dict()
self.outdoor_events[e["camera_id"]][e["time"]] = e
elif e["type"] != "outdoor":
if e["camera_id"] not in self.events:
self.events[e["camera_id"]] = dict()
self.events[e["camera_id"]][e["time"]] = e
for c in self.rawData["homes"][i]["cameras"]:
self.cameras[nameHome][c["id"]] = c
if c["type"] == "NACamera" and "modules" in c:
for m in c["modules"]:
self.modules[m["id"]] = m
self.modules[m["id"]]["cam_id"] = c["id"]
for t in self.rawData["homes"][i]["cameras"]:
self.types[nameHome][t["type"]] = t
for camera in self.events:
self.lastEvent[camera] = self.events[camera][
sorted(self.events[camera])[-1]
]
for camera in self.outdoor_events:
self.outdoor_lastEvent[camera] = self.outdoor_events[camera][
sorted(self.outdoor_events[camera])[-1]
]
if self.modules != {}:
self.default_module = list(self.modules.values())[0]["name"]
else:
self.default_module = None
if self.default_home is not None and len(self.cameras) > 0:
self.default_camera = list(self.cameras[self.default_home].values())[0]
def homeById(self, hid):
return None if hid not in self.homes else self.homes[hid]
def homeByName(self, home=None):
if not home:
return self.homeByName(self.default_home)
for key, value in self.homes.items():
if value["name"] == home:
return self.homes[key]
def cameraById(self, cid):
for home, cam in self.cameras.items():
if cid in self.cameras[home]:
return self.cameras[home][cid]
return None
def cameraByName(self, camera=None, home=None):
if not camera and not home:
return self.default_camera
elif home and camera:
if home not in self.cameras:
return None
for cam_id in self.cameras[home]:
if self.cameras[home][cam_id]["name"] == camera:
return self.cameras[home][cam_id]
elif not home and camera:
for home, cam_ids in self.cameras.items():
for cam_id in cam_ids:
if self.cameras[home][cam_id]["name"] == camera:
return self.cameras[home][cam_id]
else:
return list(self.cameras[home].values())[0]
return None
def moduleById(self, mid):
return None if mid not in self.modules else self.modules[mid]
def moduleByName(self, module=None, camera=None, home=None):
if not module:
if self.default_module:
return self.moduleByName(self.default_module)
else:
return None
cam = None
if camera or home:
cam = self.cameraByName(camera, home)
if not cam:
return None
for key, value in self.modules.items():
if value["name"] == module:
if cam and value["cam_id"] != cam["id"]:
return None
return self.modules[key]
return None
def cameraType(self, camera=None, home=None, cid=None):
"""
Return the type of a given camera.
"""
cameratype = None
if cid:
camera_data = self.cameraById(cid)
else:
camera_data = self.cameraByName(camera=camera, home=home)
if camera_data:
cameratype = camera_data["type"]
return cameratype
def cameraUrls(self, camera=None, home=None, cid=None):
"""
Return the vpn_url and the local_url (if available) of a given camera
in order to access to its live feed
"""
local_url = None
vpn_url = None
if cid:
camera_data = self.cameraById(cid)
else:
camera_data = self.cameraByName(camera=camera, home=home)
if camera_data:
vpn_url = camera_data["vpn_url"]
if camera_data["is_local"]:
try:
resp = postRequest(
"{0}/command/ping".format(camera_data["vpn_url"]), dict()
)
temp_local_url = resp["local_url"]
except URLError:
return None, None
try:
resp = postRequest(
"{0}/command/ping".format(temp_local_url), dict()
)
if temp_local_url == resp["local_url"]:
local_url = temp_local_url
except URLError:
pass
return vpn_url, local_url
def personsAtHome(self, home=None):
"""
Return the list of known persons who are currently at home
"""
if not home:
home = self.default_home
home_data = self.homeByName(home)
atHome = []
for p in home_data["persons"]:
# Only check known persons
if "pseudo" in p:
if not p["out_of_sight"]:
atHome.append(p["pseudo"])
return atHome
def getCameraPicture(self, image_id, key):
"""
Download a specific image (of an event or user face) from the camera
"""
postParams = {
"access_token": self.getAuthToken,
"image_id": image_id,
"key": key,
}
resp = postRequest(_GETCAMERAPICTURE_REQ, postParams)
image_type = imghdr.what("NONE.FILE", resp)
return resp, image_type
def getProfileImage(self, name):
"""
Retrieve the face of a given person
"""
for p in self.persons:
if "pseudo" in self.persons[p]:
if name == self.persons[p]["pseudo"]:
image_id = self.persons[p]["face"]["id"]
key = self.persons[p]["face"]["key"]
return self.getCameraPicture(image_id, key)
return None, None
def updateEvent(self, event=None, home=None, cameratype=None):
"""
Update the list of event with the latest ones
"""
if not home:
home = self.default_home
if cameratype == "NACamera":
# for the Welcome camera
if not event:
# If not event is provided we need to retrieve the oldest of
# the last event seen by each camera
listEvent = dict()
for cam_id in self.lastEvent:
listEvent[self.lastEvent[cam_id]["time"]] = self.lastEvent[cam_id]
event = listEvent[sorted(listEvent)[0]]
if cameratype == "NOC":
# for the Presence camera
if not event:
# If not event is provided we need to retrieve the oldest of
# the last event seen by each camera
listEvent = dict()
for cam_id in self.outdoor_lastEvent:
listEvent[
self.outdoor_lastEvent[cam_id]["time"]
] = self.outdoor_lastEvent[cam_id]
event = listEvent[sorted(listEvent)[0]]
home_data = self.homeByName(home)
postParams = {
"access_token": self.getAuthToken,
"home_id": home_data["id"],
"event_id": event["id"],
}
resp = postRequest(_GETEVENTSUNTIL_REQ, postParams)
eventList = resp["body"]["events_list"]
for e in eventList:
if e["type"] == "outdoor":
self.outdoor_events[e["camera_id"]][e["time"]] = e
elif e["type"] != "outdoor":
self.events[e["camera_id"]][e["time"]] = e
for camera in self.events:
self.lastEvent[camera] = self.events[camera][
sorted(self.events[camera])[-1]
]
for camera in self.outdoor_events:
self.outdoor_lastEvent[camera] = self.outdoor_events[camera][
sorted(self.outdoor_events[camera])[-1]
]
def personSeenByCamera(self, name, home=None, camera=None, exclude=0):
"""
Return True if a specific person has been seen by a camera
"""
try:
cam_id = self.cameraByName(camera=camera, home=home)["id"]
except TypeError:
print("personSeenByCamera: Camera name or home is unknown")
return False
# Check in the last event is someone known has been seen
if exclude:
limit = time.time() - exclude
array_time_event = sorted(self.events[cam_id])
array_time_event.reverse()
for time_ev in array_time_event:
if time_ev < limit:
return False
elif self.events[cam_id][time_ev]["type"] == "person":
person_id = self.events[cam_id][time_ev]["person_id"]
if "pseudo" in self.persons[person_id]:
if self.persons[person_id]["pseudo"] == name:
return True
elif self.lastEvent[cam_id]["type"] == "person":
person_id = self.lastEvent[cam_id]["person_id"]
if "pseudo" in self.persons[person_id]:
if self.persons[person_id]["pseudo"] == name:
return True
return False
def _knownPersons(self):
known_persons = dict()
for p_id, p in self.persons.items():
if "pseudo" in p:
known_persons[p_id] = p
return known_persons
def knownPersonsNames(self):
names = []
for p_id, p in self._knownPersons().items():
names.append(p["pseudo"])
return names
def someoneKnownSeen(self, home=None, camera=None, exclude=0):
"""
Return True if someone known has been seen
"""
try:
cam_id = self.cameraByName(camera=camera, home=home)["id"]
except TypeError:
print("someoneKnownSeen: Camera name or home is unknown")
return False
if exclude:
limit = time.time() - exclude
array_time_event = sorted(self.events[cam_id])
array_time_event.reverse()
for time_ev in array_time_event:
if time_ev < limit:
return False
elif self.events[cam_id][time_ev]["type"] == "person":
if (
self.events[cam_id][time_ev]["person_id"]
in self._knownPersons()
):
return True
# Check in the last event is someone known has been seen
elif self.lastEvent[cam_id]["type"] == "person":
if self.lastEvent[cam_id]["person_id"] in self._knownPersons():
return True
return False
def someoneUnknownSeen(self, home=None, camera=None, exclude=0):
"""
Return True if someone unknown has been seen
"""
try:
cam_id = self.cameraByName(camera=camera, home=home)["id"]
except TypeError:
print("someoneUnknownSeen: Camera name or home is unknown")
return False
if exclude:
limit = time.time() - exclude
array_time_event = sorted(self.events[cam_id])
array_time_event.reverse()
for time_ev in array_time_event:
if time_ev < limit:
return False
elif self.events[cam_id][time_ev]["type"] == "person":
if (
self.events[cam_id][time_ev]["person_id"]
not in self._knownPersons()
):
return True
# Check in the last event is someone known has been seen
elif self.lastEvent[cam_id]["type"] == "person":
if self.lastEvent[cam_id]["person_id"] not in self._knownPersons():
return True
return False
def motionDetected(self, home=None, camera=None, exclude=0):
"""
Return True if movement has been detected
"""
try:
cam_id = self.cameraByName(camera=camera, home=home)["id"]
except TypeError:
print("motionDetected: Camera name or home is unknown")
return False
if exclude:
limit = time.time() - exclude
array_time_event = sorted(self.events[cam_id])
array_time_event.reverse()
for time_ev in array_time_event:
if time_ev < limit:
return False
elif self.events[cam_id][time_ev]["type"] == "movement":
return True
elif self.lastEvent[cam_id]["type"] == "movement":
return True
return False
def outdoormotionDetected(self, home=None, camera=None, offset=0):
"""
Return True if outdoor movement has been detected
"""
try:
cam_id = self.cameraByName(camera=camera, home=home)["id"]
except TypeError:
print("outdoormotionDetected: Camera name or home is unknown")
return False
if cam_id in self.lastEvent:
if self.lastEvent[cam_id]["type"] == "movement":
if self.lastEvent[cam_id][
"video_status"
] == "recording" and self.lastEvent[cam_id]["time"] + offset > int(
time.time()
):
return True
return False
def humanDetected(self, home=None, camera=None, offset=0):
"""
Return True if a human has been detected
"""
try:
cam_id = self.cameraByName(camera=camera, home=home)["id"]
except TypeError:
print("personSeenByCamera: Camera name or home is unknown")
return False
if self.outdoor_lastEvent[cam_id]["video_status"] == "recording":
for e in self.outdoor_lastEvent[cam_id]["event_list"]:
if e["type"] == "human" and e["time"] + offset > int(time.time()):
return True
return False
def animalDetected(self, home=None, camera=None, offset=0):
"""
Return True if an animal has been detected
"""
try:
cam_id = self.cameraByName(camera=camera, home=home)["id"]
except TypeError:
print("animalDetected: Camera name or home is unknown")
return False
if self.outdoor_lastEvent[cam_id]["video_status"] == "recording":
for e in self.outdoor_lastEvent[cam_id]["event_list"]:
if e["type"] == "animal" and e["time"] + offset > int(time.time()):
return True
return False
def carDetected(self, home=None, camera=None, offset=0):
"""
Return True if a car has been detected
"""
try:
cam_id = self.cameraByName(camera=camera, home=home)["id"]
except TypeError:
print("carDetected: Camera name or home is unknown")
return False
if self.outdoor_lastEvent[cam_id]["video_status"] == "recording":
for e in self.outdoor_lastEvent[cam_id]["event_list"]:
if e["type"] == "vehicle" and e["time"] + offset > int(time.time()):
return True
return False
def moduleMotionDetected(self, module=None, home=None, camera=None, exclude=0):
"""
Return True if movement has been detected
"""
try:
mod = self.moduleByName(module, camera=camera, home=home)
mod_id = mod["id"]
cam_id = mod["cam_id"]
except TypeError:
print(
"moduleMotionDetected: Module name or" "Camera name or home is unknown"
)
return False
if exclude:
limit = time.time() - exclude
array_time_event = sorted(self.events[cam_id])
array_time_event.reverse()
for time_ev in array_time_event:
if time_ev < limit:
return False
elif (
self.events[cam_id][time_ev]["type"] == "tag_big_move"
or self.events[cam_id][time_ev]["type"] == "tag_small_move"
) and self.events[cam_id][time_ev]["module_id"] == mod_id:
return True
elif (
self.lastEvent[cam_id]["type"] == "tag_big_move"
or self.lastEvent[cam_id]["type"] == "tag_small_move"
) and self.lastEvent[cam_id]["module_id"] == mod_id:
return True
return False
def moduleOpened(self, module=None, home=None, camera=None, exclude=0):
"""
Return True if module status is open
"""
try:
mod = self.moduleByName(module, camera=camera, home=home)
mod_id = mod["id"]
cam_id = mod["cam_id"]
except TypeError:
print("moduleOpened: Camera name, or home, or module is unknown")
return False
if exclude:
limit = time.time() - exclude
array_time_event = sorted(self.events[cam_id])
array_time_event.reverse()
for time_ev in array_time_event:
if time_ev < limit:
return False
elif (
self.events[cam_id][time_ev]["type"] == "tag_open"
and self.events[cam_id][time_ev]["module_id"] == mod_id
):
return True
elif (
self.lastEvent[cam_id]["type"] == "tag_open"
and self.lastEvent[cam_id]["module_id"] == mod_id
):
return True
return False
| 38.428843 | 87 | 0.531947 |
fcd798960b94dd8ff2cf47e8889b7b765b9ffc57 | 24,477 | py | Python | zaza/openstack/utilities/generic.py | freyes/zaza-openstack-tests | c9834315f996966aaedd95d712a991df7a449eb8 | [
"ECL-2.0",
"Apache-2.0"
] | 5 | 2019-08-09T02:39:12.000Z | 2021-05-18T14:19:51.000Z | zaza/openstack/utilities/generic.py | freyes/zaza-openstack-tests | c9834315f996966aaedd95d712a991df7a449eb8 | [
"ECL-2.0",
"Apache-2.0"
] | 350 | 2019-05-13T10:28:33.000Z | 2022-03-30T13:35:16.000Z | zaza/openstack/utilities/generic.py | freyes/zaza-openstack-tests | c9834315f996966aaedd95d712a991df7a449eb8 | [
"ECL-2.0",
"Apache-2.0"
] | 72 | 2019-04-18T06:05:01.000Z | 2022-03-29T05:41:40.000Z | # Copyright 2018 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of functions that did not fit anywhere else."""
import asyncio
import logging
import os
import socket
import subprocess
import telnetlib
import tempfile
import yaml
from zaza import model
from zaza.openstack.utilities import exceptions as zaza_exceptions
from zaza.openstack.utilities.os_versions import UBUNTU_OPENSTACK_RELEASE
from zaza.utilities import juju as juju_utils
def assertActionRanOK(action):
"""Assert that the remote action ran successfully.
Example usage::
self.assertActionRanOK(model.run_action(
unit,
'pause',
model_name=self.model_name))
self.assertActionRanOK(model.run_action_on_leader(
unit,
'pause',
model_name=self.model_name))
:param action: Action object to check.
:type action: juju.action.Action
:raises: AssertionError if the assertion fails.
"""
if action.status != 'completed':
msg = ("Action '{name}' exited with status '{status}': "
"'{message}'").format(**action.data)
raise AssertionError(msg)
def assertRemoteRunOK(run_output):
"""Use with zaza.model.run_on_unit.
Example usage::
self.assertRemoteRunOK(zaza.model.run_on_unit(
unit,
'ls /tmp/'))
:param action: Dict returned from remote run.
:type action: dict
:raises: AssertionError if the assertion fails.
"""
if int(run_output['Code']) != 0:
raise AssertionError("Command failed: {}".format(run_output))
def dict_to_yaml(dict_data):
"""Return YAML from dictionary.
:param dict_data: Dictionary data
:type dict_data: dict
:returns: YAML dump
:rtype: string
"""
return yaml.dump(dict_data, default_flow_style=False)
def get_network_config(net_topology, ignore_env_vars=False,
net_topology_file="network.yaml"):
"""Get network info from environment.
Get network info from network.yaml, override the values if specific
environment variables are set for the undercloud.
This function may be used when running network configuration from CLI to
pass in network configuration settings from a YAML file.
:param net_topology: Network topology name from network.yaml
:type net_topology: string
:param ignore_env_vars: Ignore enviroment variables or not
:type ignore_env_vars: boolean
:returns: Dictionary of network configuration
:rtype: dict
"""
if os.path.exists(net_topology_file):
net_info = get_yaml_config(net_topology_file)[net_topology]
else:
raise Exception("Network topology file: {} not found."
.format(net_topology_file))
if not ignore_env_vars:
logging.info("Consuming network environment variables as overrides "
"for the undercloud.")
net_info.update(get_undercloud_env_vars())
logging.info("Network info: {}".format(dict_to_yaml(net_info)))
return net_info
def get_unit_hostnames(units, fqdn=False):
"""Return a dict of juju unit names to hostnames."""
host_names = {}
for unit in units:
cmd = 'hostname'
if fqdn:
cmd = cmd + ' -f'
output = model.run_on_unit(unit.entity_id, cmd)
hostname = output['Stdout'].strip()
host_names[unit.entity_id] = hostname
return host_names
def get_pkg_version(application, pkg, model_name=None):
"""Return package version.
:param application: Application name
:type application: string
:param pkg: Package name
:type pkg: string
:param model_name: Name of model to query.
:type model_name: str
:returns: List of package version
:rtype: list
"""
versions = []
units = model.get_units(application, model_name=model_name)
for unit in units:
cmd = 'dpkg -l | grep {}'.format(pkg)
out = juju_utils.remote_run(unit.entity_id, cmd, model_name=model_name)
versions.append(out.split('\n')[0].split()[2])
if len(set(versions)) != 1:
raise Exception('Unexpected output from pkg version check')
return versions[0]
def get_undercloud_env_vars():
"""Get environment specific undercloud network configuration settings.
Get environment specific undercloud network configuration settings from
environment variables.
For each testing substrate, specific undercloud network configuration
settings should be exported into the environment to enable testing on that
substrate.
Note: *Overcloud* settings should be declared by the test caller and should
not be overridden here.
Return a dictionary compatible with zaza.openstack.configure.network
functions' expected key structure.
Example exported environment variables:
export default_gateway="172.17.107.1"
export external_net_cidr="172.17.107.0/24"
export external_dns="10.5.0.2"
export start_floating_ip="172.17.107.200"
export end_floating_ip="172.17.107.249"
Example o-c-t & uosci non-standard environment variables:
export NET_ID="a705dd0f-5571-4818-8c30-4132cc494668"
export GATEWAY="172.17.107.1"
export CIDR_EXT="172.17.107.0/24"
export NAME_SERVER="10.5.0.2"
export FIP_RANGE="172.17.107.200:172.17.107.249"
:returns: Network environment variables
:rtype: dict
"""
# Handle OSCI environment variables
# Note: TEST_* is the only prefix honored
_vars = {}
_vars['net_id'] = os.environ.get('TEST_NET_ID')
_vars['external_dns'] = os.environ.get('TEST_NAME_SERVER')
_vars['default_gateway'] = os.environ.get('TEST_GATEWAY')
_vars['external_net_cidr'] = os.environ.get('TEST_CIDR_EXT')
# Take FIP_RANGE and create start and end floating ips
_fip_range = os.environ.get('TEST_FIP_RANGE')
if _fip_range is not None and ':' in _fip_range:
_vars['start_floating_ip'] = os.environ.get(
'TEST_FIP_RANGE').split(':')[0]
_vars['end_floating_ip'] = os.environ.get(
'TEST_FIP_RANGE').split(':')[1]
# zaza.openstack.configure.network functions variables still take priority
# for local testing. Override OSCI settings.
_keys = ['default_gateway',
'start_floating_ip',
'end_floating_ip',
'external_dns',
'external_net_cidr']
for _key in _keys:
_val = os.environ.get(_key)
if _val:
_vars[_key] = _val
# Remove keys and items with a None value
for k, v in list(_vars.items()):
if not v:
del _vars[k]
return _vars
def get_yaml_config(config_file):
"""Return configuration from YAML file.
:param config_file: Configuration file name
:type config_file: string
:returns: Dictionary of configuration
:rtype: dict
"""
# Note in its original form get_mojo_config it would do a search pattern
# through mojo stage directories. This version assumes the yaml file is in
# the pwd.
logging.info('Using config %s' % (config_file))
return yaml.safe_load(open(config_file, 'r').read())
def set_origin(application, origin='openstack-origin', pocket='distro'):
"""Set the configuration option for origin source.
:param application: Name of application to upgrade series
:type application: str
:param origin: The configuration setting variable name for changing origin
source. (openstack-origin or source)
:type origin: str
:param pocket: Origin source cloud pocket.
i.e. 'distro' or 'cloud:xenial-newton'
:type pocket: str
:returns: None
:rtype: None
"""
logging.info("Set origin on {} to {}".format(application, origin))
model.set_application_config(application, {origin: pocket})
async def async_set_origin(application, origin='openstack-origin',
pocket='distro'):
"""Set the configuration option for origin source.
:param application: Name of application to upgrade series
:type application: str
:param origin: The configuration setting variable name for changing origin
source. (openstack-origin or source)
:type origin: str
:param pocket: Origin source cloud pocket.
i.e. 'distro' or 'cloud:xenial-newton'
:type pocket: str
:returns: None
:rtype: None
"""
logging.info("Set origin on {} to {}".format(application, origin))
await model.async_set_application_config(application, {origin: pocket})
def run_via_ssh(unit_name, cmd):
"""Run command on unit via ssh.
For executing commands on units when the juju agent is down.
:param unit_name: Unit Name
:param cmd: Command to execute on remote unit
:type cmd: str
:returns: None
:rtype: None
"""
if "sudo" not in cmd:
cmd = "sudo {}".format(cmd)
cmd = ['juju', 'ssh', unit_name, cmd]
logging.info("Running {} on {}".format(cmd, unit_name))
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError as e:
logging.warn("Failed command {} on {}".format(cmd, unit_name))
logging.warn(e)
async def async_run_via_ssh(unit_name, cmd, raise_exceptions=False):
"""Run command on unit via ssh.
For executing commands on units when the juju agent is down.
:param unit_name: Unit Name
:param cmd: Command to execute on remote unit
:type cmd: str
:returns: None
:rtype: None
"""
if "sudo" not in cmd:
# cmd.insert(0, "sudo")
cmd = "sudo {}".format(cmd)
cmd = ['juju', 'ssh', unit_name, cmd]
try:
await check_call(cmd)
except subprocess.CalledProcessError as e:
logging.warn("Failed command {} on {}".format(cmd, unit_name))
logging.warn(e)
if raise_exceptions:
raise e
def check_commands_on_units(commands, units):
"""Check that all commands in a list exit zero on all units in a list.
:param commands: list of bash commands
:param units: list of unit pointers
:returns: None if successful; Failure message otherwise
"""
logging.debug('Checking exit codes for {} commands on {} '
'units...'.format(len(commands),
len(units)))
for u in units:
for cmd in commands:
output = model.run_on_unit(u.entity_id, cmd)
if int(output['Code']) == 0:
logging.debug('{} `{}` returned {} '
'(OK)'.format(u.entity_id,
cmd, output['Code']))
else:
return ('{} `{}` returned {} '
'{}'.format(u.entity_id,
cmd, output['Code'], output))
return None
def reboot(unit_name):
"""Reboot unit.
:param unit_name: Unit Name
:type unit_name: str
:returns: None
:rtype: None
"""
# NOTE: When used with series upgrade the agent will be down.
# Even juju run will not work
cmd = ['juju', 'ssh', unit_name, 'sudo', 'reboot', '&&', 'exit']
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError as e:
logging.info(e)
pass
async def async_reboot(unit_name):
"""Reboot unit.
:param unit_name: Unit Name
:type unit_name: str
:returns: None
:rtype: None
"""
# NOTE: When used with series upgrade the agent will be down.
# Even juju run will not work
await async_run_via_ssh(unit_name, "sudo reboot && exit")
async def check_call(cmd):
"""Asynchronous function to check a subprocess call.
:param cmd: Command to execute
:type cmd: List[str]
:returns: None
:rtype: None
"""
proc = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
stdout, stderr = await proc.communicate()
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
if proc.returncode != 0:
logging.warn("STDOUT: {}".format(stdout))
logging.warn("STDERR: {}".format(stderr))
raise subprocess.CalledProcessError(proc.returncode, cmd)
else:
if stderr:
logging.info("STDERR: {} ({})".format(stderr, ' '.join(cmd)))
if stdout:
logging.info("STDOUT: {} ({})".format(stdout, ' '.join(cmd)))
def set_dpkg_non_interactive_on_unit(
unit_name, apt_conf_d="/etc/apt/apt.conf.d/50unattended-upgrades"):
"""Set dpkg options on unit.
:param unit_name: Unit Name
:type unit_name: str
:param apt_conf_d: Apt.conf file to update
:type apt_conf_d: str
"""
DPKG_NON_INTERACTIVE = 'DPkg::options { "--force-confdef"; };'
# Check if the option exists. If not, add it to the apt.conf.d file
cmd = ("grep '{option}' {file_name} || echo '{option}' >> {file_name}"
.format(option=DPKG_NON_INTERACTIVE, file_name=apt_conf_d))
model.run_on_unit(unit_name, cmd)
async def async_set_dpkg_non_interactive_on_unit(
unit_name, apt_conf_d="/etc/apt/apt.conf.d/50unattended-upgrades"):
"""Set dpkg options on unit.
:param unit_name: Unit Name
:type unit_name: str
:param apt_conf_d: Apt.conf file to update
:type apt_conf_d: str
"""
DPKG_NON_INTERACTIVE = 'DPkg::options { "--force-confdef"; };'
# Check if the option exists. If not, add it to the apt.conf.d file
cmd = ("grep '{option}' {file_name} || echo '{option}' >> {file_name}"
.format(option=DPKG_NON_INTERACTIVE, file_name=apt_conf_d))
await model.async_run_on_unit(unit_name, cmd)
def get_process_id_list(unit_name, process_name,
expect_success=True):
"""Get a list of process ID(s).
Get a list of process ID(s) from a single sentry juju unit
for a single process name.
:param unit_name: Amulet sentry instance (juju unit)
:param process_name: Process name
:param expect_success: If False, expect the PID to be missing,
raise if it is present.
:returns: List of process IDs
:raises: zaza_exceptions.ProcessIdsFailed
"""
cmd = 'pidof -x "{}"'.format(process_name)
if not expect_success:
cmd += " || exit 0 && exit 1"
results = model.run_on_unit(unit_name=unit_name, command=cmd)
code = results.get("Code", 1)
try:
code = int(code)
except ValueError:
code = 1
error = results.get("Stderr")
output = results.get("Stdout")
if code != 0:
msg = ('{} `{}` returned {} '
'{} with error {}'.format(unit_name, cmd, code, output, error))
raise zaza_exceptions.ProcessIdsFailed(msg)
return str(output).split()
def get_unit_process_ids(unit_processes, expect_success=True):
"""Get unit process ID(s).
Construct a dict containing unit sentries, process names, and
process IDs.
:param unit_processes: A dictionary of unit names
to list of process names.
:param expect_success: if False expect the processes to not be
running, raise if they are.
:returns: Dictionary of unit names to dictionary
of process names to PIDs.
:raises: zaza_exceptions.ProcessIdsFailed
"""
pid_dict = {}
for unit_name, process_list in unit_processes.items():
pid_dict[unit_name] = {}
for process in process_list:
pids = get_process_id_list(
unit_name, process, expect_success=expect_success)
pid_dict[unit_name].update({process: pids})
return pid_dict
def validate_unit_process_ids(expected, actual):
"""Validate process id quantities for services on units.
:returns: True if the PIDs are validated, raises an exception
if it is not the case.
:raises: zaza_exceptions.UnitCountMismatch
:raises: zaza_exceptions.UnitNotFound
:raises: zaza_exceptions.ProcessNameCountMismatch
:raises: zaza_exceptions.ProcessNameMismatch
:raises: zaza_exceptions.PIDCountMismatch
"""
logging.debug('Checking units for running processes...')
logging.debug('Expected PIDs: {}'.format(expected))
logging.debug('Actual PIDs: {}'.format(actual))
if len(actual) != len(expected):
msg = ('Unit count mismatch. expected, actual: {}, '
'{} '.format(len(expected), len(actual)))
raise zaza_exceptions.UnitCountMismatch(msg)
for (e_unit_name, e_proc_names) in expected.items():
if e_unit_name in actual.keys():
a_proc_names = actual[e_unit_name]
else:
msg = ('Expected unit ({}) not found in actual dict data.'.
format(e_unit_name))
raise zaza_exceptions.UnitNotFound(msg)
if len(e_proc_names.keys()) != len(a_proc_names.keys()):
msg = ('Process name count mismatch. expected, actual: {}, '
'{}'.format(len(expected), len(actual)))
raise zaza_exceptions.ProcessNameCountMismatch(msg)
for (e_proc_name, e_pids), (a_proc_name, a_pids) in \
zip(e_proc_names.items(), a_proc_names.items()):
if e_proc_name != a_proc_name:
msg = ('Process name mismatch. expected, actual: {}, '
'{}'.format(e_proc_name, a_proc_name))
raise zaza_exceptions.ProcessNameMismatch(msg)
a_pids_length = len(a_pids)
fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
'{}, {} ({})'.format(e_unit_name, e_proc_name,
e_pids, a_pids_length,
a_pids))
# If expected is a list, ensure at least one PID quantity match
if isinstance(e_pids, list) and \
a_pids_length not in e_pids:
raise zaza_exceptions.PIDCountMismatch(fail_msg)
# If expected is not bool and not list,
# ensure PID quantities match
elif not isinstance(e_pids, bool) and \
not isinstance(e_pids, list) and \
a_pids_length != e_pids:
raise zaza_exceptions.PIDCountMismatch(fail_msg)
# If expected is bool True, ensure 1 or more PIDs exist
elif isinstance(e_pids, bool) and \
e_pids is True and a_pids_length < 1:
raise zaza_exceptions.PIDCountMismatch(fail_msg)
# If expected is bool False, ensure 0 PIDs exist
elif isinstance(e_pids, bool) and \
e_pids is False and a_pids_length != 0:
raise zaza_exceptions.PIDCountMismatch(fail_msg)
else:
logging.debug('PID check OK: {} {} {}: '
'{}'.format(e_unit_name, e_proc_name,
e_pids, a_pids))
return True
def get_ubuntu_release(ubuntu_name):
"""Get index of Ubuntu release.
Returns the index of the name of the Ubuntu release in
UBUNTU_OPENSTACK_RELEASE.
:param ubuntu_name: Name of the Ubuntu release.
:type ubuntu_name: string
:returns: Index of the Ubuntu release
:rtype: integer
:raises: zaza_exceptions.UbuntuReleaseNotFound
"""
ubuntu_releases = list(UBUNTU_OPENSTACK_RELEASE.keys())
try:
index = ubuntu_releases.index(ubuntu_name)
except ValueError:
msg = ('Could not find Ubuntu release {} in {}'.
format(ubuntu_name, UBUNTU_OPENSTACK_RELEASE))
raise zaza_exceptions.UbuntuReleaseNotFound(msg)
return index
def get_file_contents(unit, f):
"""Get contents of a file on a remote unit."""
return model.run_on_unit(unit.entity_id,
"cat {}".format(f))['Stdout']
def is_port_open(port, address):
"""Determine if TCP port is accessible.
Connect to the MySQL port on the VIP.
:param port: Port number
:type port: str
:param address: IP address
:type port: str
:returns: True if port is reachable
:rtype: boolean
"""
try:
telnetlib.Telnet(address, port)
return True
except socket.error as e:
if e.errno == 113:
logging.error("could not connect to {}:{}"
.format(address, port))
if e.errno == 111:
logging.error("connection refused connecting"
" to {}:{}".format(address, port))
return False
def port_knock_units(units, port=22, expect_success=True):
"""Check if specific port is open on units.
Open a TCP socket to check for a listening sevice on each listed juju unit.
:param units: list of unit pointers
:param port: TCP port number, default to 22
:param timeout: Connect timeout, default to 15 seconds
:expect_success: True by default, set False to invert logic
:returns: None if successful, Failure message otherwise
"""
for u in units:
host = u.public_address
connected = is_port_open(port, host)
if not connected and expect_success:
return 'Socket connect failed.'
elif connected and not expect_success:
return 'Socket connected unexpectedly.'
def get_series(unit):
"""Ubuntu release name running on unit."""
result = model.run_on_unit(unit.entity_id,
"lsb_release -cs")
return result['Stdout'].strip()
def systemctl(unit, service, command="restart"):
"""Run systemctl command on a unit.
:param unit: Unit object or unit name
:type unit: Union[Unit,string]
:param service: Name of service to act on
:type service: string
:param command: Name of command. i.e. start, stop, restart
:type command: string
:raises: AssertionError if the command is unsuccessful
:returns: None if successful
"""
cmd = "/bin/systemctl {} {}".format(command, service)
# Check if this is a unit object or string name of a unit
try:
unit.entity_id
except AttributeError:
unit = model.get_unit_from_name(unit)
result = model.run_on_unit(
unit.entity_id, cmd)
assert int(result['Code']) == 0, (
"{} of {} on {} failed".format(command, service, unit.entity_id))
def get_mojo_cacert_path():
"""Retrieve cacert from Mojo storage location.
:returns: Path to cacert
:rtype: str
:raises: zaza_exceptions.CACERTNotFound
:raises: :class:`zaza_exceptions.CACERTNotfound`
"""
try:
cert_dir = os.environ['MOJO_LOCAL_DIR']
except KeyError:
raise zaza_exceptions.CACERTNotFound(
"Could not find cacert.pem, MOJO_LOCAL_DIR unset")
cacert = os.path.join(cert_dir, 'cacert.pem')
if os.path.exists(cacert):
return cacert
else:
raise zaza_exceptions.CACERTNotFound("Could not find cacert.pem")
def attach_file_resource(application_name, resource_name,
file_content, file_suffix=".txt"):
"""Attaches a file as a Juju resource given the file content and suffix.
The file content will be written into a temporary file with the given
suffix, and it will be attached to the Juju application.
:param application_name: Juju application name.
:type application_name: string
:param resource_name: Juju resource name.
:type resource_name: string
:param file_content: The content of the file that will be attached
:type file_content: string
:param file_suffix: File suffix. This should be used to set the file
extension for applications that are sensitive to this.
:type file_suffix: string
:returns: None
"""
with tempfile.NamedTemporaryFile(mode='w', suffix=file_suffix) as fp:
fp.write(file_content)
fp.flush()
model.attach_resource(
application_name, resource_name, fp.name)
| 34.523272 | 79 | 0.642317 |
93631f15fac62212612113e64d74692065984b44 | 1,960 | py | Python | config/wsgi.py | zubbyik/git-cookie | 3cee273877130cb37983ac9ae9bb9ca00601bf3f | [
"BSD-3-Clause"
] | null | null | null | config/wsgi.py | zubbyik/git-cookie | 3cee273877130cb37983ac9ae9bb9ca00601bf3f | [
"BSD-3-Clause"
] | null | null | null | config/wsgi.py | zubbyik/git-cookie | 3cee273877130cb37983ac9ae9bb9ca00601bf3f | [
"BSD-3-Clause"
] | null | null | null | """
WSGI config for cookie_site project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# cookie_site directory.
app_path = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir))
sys.path.append(os.path.join(app_path, 'cookie_site'))
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
application = Sentry(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 45.581395 | 79 | 0.796939 |
8c677bdc337d41a8f3c51f25a8180f3b05fcff3b | 27,086 | py | Python | sympy/geometry/plane.py | yupbank/sympy | 66d7aef9dc1b26055af22e27ba42004c40b95d7c | [
"BSD-3-Clause"
] | 1 | 2021-11-19T03:38:42.000Z | 2021-11-19T03:38:42.000Z | sympy/geometry/plane.py | yupbank/sympy | 66d7aef9dc1b26055af22e27ba42004c40b95d7c | [
"BSD-3-Clause"
] | 3 | 2022-02-04T14:45:16.000Z | 2022-02-04T14:45:45.000Z | sympy/geometry/plane.py | yupbank/sympy | 66d7aef9dc1b26055af22e27ba42004c40b95d7c | [
"BSD-3-Clause"
] | null | null | null | """Geometrical Planes.
Contains
========
Plane
"""
from sympy.core import Dummy, Rational, S, Symbol
from sympy.core.symbol import _symbol
from sympy.functions.elementary.trigonometric import cos, sin, acos, asin, sqrt
from .entity import GeometryEntity
from .line import (Line, Ray, Segment, Line3D, LinearEntity, LinearEntity3D,
Ray3D, Segment3D)
from .point import Point, Point3D
from sympy.matrices import Matrix
from sympy.polys.polytools import cancel
from sympy.solvers import solve, linsolve
from sympy.utilities.iterables import uniq, is_sequence
from sympy.utilities.misc import filldedent, func_name, Undecidable
from mpmath.libmp.libmpf import prec_to_dps
import random
class Plane(GeometryEntity):
"""
A plane is a flat, two-dimensional surface. A plane is the two-dimensional
analogue of a point (zero-dimensions), a line (one-dimension) and a solid
(three-dimensions). A plane can generally be constructed by two types of
inputs. They are three non-collinear points and a point and the plane's
normal vector.
Attributes
==========
p1
normal_vector
Examples
========
>>> from sympy import Plane, Point3D
>>> Plane(Point3D(1, 1, 1), Point3D(2, 3, 4), Point3D(2, 2, 2))
Plane(Point3D(1, 1, 1), (-1, 2, -1))
>>> Plane((1, 1, 1), (2, 3, 4), (2, 2, 2))
Plane(Point3D(1, 1, 1), (-1, 2, -1))
>>> Plane(Point3D(1, 1, 1), normal_vector=(1,4,7))
Plane(Point3D(1, 1, 1), (1, 4, 7))
"""
def __new__(cls, p1, a=None, b=None, **kwargs):
p1 = Point3D(p1, dim=3)
if a and b:
p2 = Point(a, dim=3)
p3 = Point(b, dim=3)
if Point3D.are_collinear(p1, p2, p3):
raise ValueError('Enter three non-collinear points')
a = p1.direction_ratio(p2)
b = p1.direction_ratio(p3)
normal_vector = tuple(Matrix(a).cross(Matrix(b)))
else:
a = kwargs.pop('normal_vector', a)
evaluate = kwargs.get('evaluate', True)
if is_sequence(a) and len(a) == 3:
normal_vector = Point3D(a).args if evaluate else a
else:
raise ValueError(filldedent('''
Either provide 3 3D points or a point with a
normal vector expressed as a sequence of length 3'''))
if all(coord.is_zero for coord in normal_vector):
raise ValueError('Normal vector cannot be zero vector')
return GeometryEntity.__new__(cls, p1, normal_vector, **kwargs)
def __contains__(self, o):
x, y, z = map(Dummy, 'xyz')
k = self.equation(x, y, z)
if isinstance(o, (LinearEntity, LinearEntity3D)):
t = Dummy()
d = Point3D(o.arbitrary_point(t))
e = k.subs([(x, d.x), (y, d.y), (z, d.z)])
return e.equals(0)
try:
o = Point(o, dim=3, strict=True)
d = k.xreplace(dict(zip((x, y, z), o.args)))
return d.equals(0)
except TypeError:
return False
def _eval_evalf(self, prec=15, **options):
pt, tup = self.args
dps = prec_to_dps(prec)
pt = pt.evalf(n=dps, **options)
tup = tuple([i.evalf(n=dps, **options) for i in tup])
return self.func(pt, normal_vector=tup, evaluate=False)
def angle_between(self, o):
"""Angle between the plane and other geometric entity.
Parameters
==========
LinearEntity3D, Plane.
Returns
=======
angle : angle in radians
Notes
=====
This method accepts only 3D entities as it's parameter, but if you want
to calculate the angle between a 2D entity and a plane you should
first convert to a 3D entity by projecting onto a desired plane and
then proceed to calculate the angle.
Examples
========
>>> from sympy import Point3D, Line3D, Plane
>>> a = Plane(Point3D(1, 2, 2), normal_vector=(1, 2, 3))
>>> b = Line3D(Point3D(1, 3, 4), Point3D(2, 2, 2))
>>> a.angle_between(b)
-asin(sqrt(21)/6)
"""
if isinstance(o, LinearEntity3D):
a = Matrix(self.normal_vector)
b = Matrix(o.direction_ratio)
c = a.dot(b)
d = sqrt(sum([i**2 for i in self.normal_vector]))
e = sqrt(sum([i**2 for i in o.direction_ratio]))
return asin(c/(d*e))
if isinstance(o, Plane):
a = Matrix(self.normal_vector)
b = Matrix(o.normal_vector)
c = a.dot(b)
d = sqrt(sum([i**2 for i in self.normal_vector]))
e = sqrt(sum([i**2 for i in o.normal_vector]))
return acos(c/(d*e))
def arbitrary_point(self, u=None, v=None):
""" Returns an arbitrary point on the Plane. If given two
parameters, the point ranges over the entire plane. If given 1
or no parameters, returns a point with one parameter which,
when varying from 0 to 2*pi, moves the point in a circle of
radius 1 about p1 of the Plane.
Examples
========
>>> from sympy import Plane, Ray
>>> from sympy.abc import u, v, t, r
>>> p = Plane((1, 1, 1), normal_vector=(1, 0, 0))
>>> p.arbitrary_point(u, v)
Point3D(1, u + 1, v + 1)
>>> p.arbitrary_point(t)
Point3D(1, cos(t) + 1, sin(t) + 1)
While arbitrary values of u and v can move the point anywhere in
the plane, the single-parameter point can be used to construct a
ray whose arbitrary point can be located at angle t and radius
r from p.p1:
>>> Ray(p.p1, _).arbitrary_point(r)
Point3D(1, r*cos(t) + 1, r*sin(t) + 1)
Returns
=======
Point3D
"""
circle = v is None
if circle:
u = _symbol(u or 't', real=True)
else:
u = _symbol(u or 'u', real=True)
v = _symbol(v or 'v', real=True)
x, y, z = self.normal_vector
a, b, c = self.p1.args
# x1, y1, z1 is a nonzero vector parallel to the plane
if x.is_zero and y.is_zero:
x1, y1, z1 = S.One, S.Zero, S.Zero
else:
x1, y1, z1 = -y, x, S.Zero
# x2, y2, z2 is also parallel to the plane, and orthogonal to x1, y1, z1
x2, y2, z2 = tuple(Matrix((x, y, z)).cross(Matrix((x1, y1, z1))))
if circle:
x1, y1, z1 = (w/sqrt(x1**2 + y1**2 + z1**2) for w in (x1, y1, z1))
x2, y2, z2 = (w/sqrt(x2**2 + y2**2 + z2**2) for w in (x2, y2, z2))
p = Point3D(a + x1*cos(u) + x2*sin(u), \
b + y1*cos(u) + y2*sin(u), \
c + z1*cos(u) + z2*sin(u))
else:
p = Point3D(a + x1*u + x2*v, b + y1*u + y2*v, c + z1*u + z2*v)
return p
@staticmethod
def are_concurrent(*planes):
"""Is a sequence of Planes concurrent?
Two or more Planes are concurrent if their intersections
are a common line.
Parameters
==========
planes: list
Returns
=======
Boolean
Examples
========
>>> from sympy import Plane, Point3D
>>> a = Plane(Point3D(5, 0, 0), normal_vector=(1, -1, 1))
>>> b = Plane(Point3D(0, -2, 0), normal_vector=(3, 1, 1))
>>> c = Plane(Point3D(0, -1, 0), normal_vector=(5, -1, 9))
>>> Plane.are_concurrent(a, b)
True
>>> Plane.are_concurrent(a, b, c)
False
"""
planes = list(uniq(planes))
for i in planes:
if not isinstance(i, Plane):
raise ValueError('All objects should be Planes but got %s' % i.func)
if len(planes) < 2:
return False
planes = list(planes)
first = planes.pop(0)
sol = first.intersection(planes[0])
if sol == []:
return False
else:
line = sol[0]
for i in planes[1:]:
l = first.intersection(i)
if not l or l[0] not in line:
return False
return True
def distance(self, o):
"""Distance between the plane and another geometric entity.
Parameters
==========
Point3D, LinearEntity3D, Plane.
Returns
=======
distance
Notes
=====
This method accepts only 3D entities as it's parameter, but if you want
to calculate the distance between a 2D entity and a plane you should
first convert to a 3D entity by projecting onto a desired plane and
then proceed to calculate the distance.
Examples
========
>>> from sympy import Point3D, Line3D, Plane
>>> a = Plane(Point3D(1, 1, 1), normal_vector=(1, 1, 1))
>>> b = Point3D(1, 2, 3)
>>> a.distance(b)
sqrt(3)
>>> c = Line3D(Point3D(2, 3, 1), Point3D(1, 2, 2))
>>> a.distance(c)
0
"""
if self.intersection(o) != []:
return S.Zero
if isinstance(o, (Segment3D, Ray3D)):
a, b = o.p1, o.p2
pi, = self.intersection(Line3D(a, b))
if pi in o:
return self.distance(pi)
elif a in Segment3D(pi, b):
return self.distance(a)
else:
assert isinstance(o, Segment3D) is True
return self.distance(b)
# following code handles `Point3D`, `LinearEntity3D`, `Plane`
a = o if isinstance(o, Point3D) else o.p1
n = Point3D(self.normal_vector).unit
d = (a - self.p1).dot(n)
return abs(d)
def equals(self, o):
"""
Returns True if self and o are the same mathematical entities.
Examples
========
>>> from sympy import Plane, Point3D
>>> a = Plane(Point3D(1, 2, 3), normal_vector=(1, 1, 1))
>>> b = Plane(Point3D(1, 2, 3), normal_vector=(2, 2, 2))
>>> c = Plane(Point3D(1, 2, 3), normal_vector=(-1, 4, 6))
>>> a.equals(a)
True
>>> a.equals(b)
True
>>> a.equals(c)
False
"""
if isinstance(o, Plane):
a = self.equation()
b = o.equation()
return cancel(a/b).is_constant()
else:
return False
def equation(self, x=None, y=None, z=None):
"""The equation of the Plane.
Examples
========
>>> from sympy import Point3D, Plane
>>> a = Plane(Point3D(1, 1, 2), Point3D(2, 4, 7), Point3D(3, 5, 1))
>>> a.equation()
-23*x + 11*y - 2*z + 16
>>> a = Plane(Point3D(1, 4, 2), normal_vector=(6, 6, 6))
>>> a.equation()
6*x + 6*y + 6*z - 42
"""
x, y, z = [i if i else Symbol(j, real=True) for i, j in zip((x, y, z), 'xyz')]
a = Point3D(x, y, z)
b = self.p1.direction_ratio(a)
c = self.normal_vector
return (sum(i*j for i, j in zip(b, c)))
def intersection(self, o):
""" The intersection with other geometrical entity.
Parameters
==========
Point, Point3D, LinearEntity, LinearEntity3D, Plane
Returns
=======
List
Examples
========
>>> from sympy import Point3D, Line3D, Plane
>>> a = Plane(Point3D(1, 2, 3), normal_vector=(1, 1, 1))
>>> b = Point3D(1, 2, 3)
>>> a.intersection(b)
[Point3D(1, 2, 3)]
>>> c = Line3D(Point3D(1, 4, 7), Point3D(2, 2, 2))
>>> a.intersection(c)
[Point3D(2, 2, 2)]
>>> d = Plane(Point3D(6, 0, 0), normal_vector=(2, -5, 3))
>>> e = Plane(Point3D(2, 0, 0), normal_vector=(3, 4, -3))
>>> d.intersection(e)
[Line3D(Point3D(78/23, -24/23, 0), Point3D(147/23, 321/23, 23))]
"""
if not isinstance(o, GeometryEntity):
o = Point(o, dim=3)
if isinstance(o, Point):
if o in self:
return [o]
else:
return []
if isinstance(o, (LinearEntity, LinearEntity3D)):
# recast to 3D
p1, p2 = o.p1, o.p2
if isinstance(o, Segment):
o = Segment3D(p1, p2)
elif isinstance(o, Ray):
o = Ray3D(p1, p2)
elif isinstance(o, Line):
o = Line3D(p1, p2)
else:
raise ValueError('unhandled linear entity: %s' % o.func)
if o in self:
return [o]
else:
t = Dummy() # unnamed else it may clash with a symbol in o
a = Point3D(o.arbitrary_point(t))
p1, n = self.p1, Point3D(self.normal_vector)
# TODO: Replace solve with solveset, when this line is tested
c = solve((a - p1).dot(n), t)
if not c:
return []
else:
c = [i for i in c if i.is_real is not False]
if len(c) > 1:
c = [i for i in c if i.is_real]
if len(c) != 1:
raise Undecidable("not sure which point is real")
p = a.subs(t, c[0])
if p not in o:
return [] # e.g. a segment might not intersect a plane
return [p]
if isinstance(o, Plane):
if self.equals(o):
return [self]
if self.is_parallel(o):
return []
else:
x, y, z = map(Dummy, 'xyz')
a, b = Matrix([self.normal_vector]), Matrix([o.normal_vector])
c = list(a.cross(b))
d = self.equation(x, y, z)
e = o.equation(x, y, z)
result = list(linsolve([d, e], x, y, z))[0]
for i in (x, y, z): result = result.subs(i, 0)
return [Line3D(Point3D(result), direction_ratio=c)]
def is_coplanar(self, o):
""" Returns True if `o` is coplanar with self, else False.
Examples
========
>>> from sympy import Plane
>>> o = (0, 0, 0)
>>> p = Plane(o, (1, 1, 1))
>>> p2 = Plane(o, (2, 2, 2))
>>> p == p2
False
>>> p.is_coplanar(p2)
True
"""
if isinstance(o, Plane):
x, y, z = map(Dummy, 'xyz')
return not cancel(self.equation(x, y, z)/o.equation(x, y, z)).has(x, y, z)
if isinstance(o, Point3D):
return o in self
elif isinstance(o, LinearEntity3D):
return all(i in self for i in self)
elif isinstance(o, GeometryEntity): # XXX should only be handling 2D objects now
return all(i == 0 for i in self.normal_vector[:2])
def is_parallel(self, l):
"""Is the given geometric entity parallel to the plane?
Parameters
==========
LinearEntity3D or Plane
Returns
=======
Boolean
Examples
========
>>> from sympy import Plane, Point3D
>>> a = Plane(Point3D(1,4,6), normal_vector=(2, 4, 6))
>>> b = Plane(Point3D(3,1,3), normal_vector=(4, 8, 12))
>>> a.is_parallel(b)
True
"""
if isinstance(l, LinearEntity3D):
a = l.direction_ratio
b = self.normal_vector
c = sum([i*j for i, j in zip(a, b)])
if c == 0:
return True
else:
return False
elif isinstance(l, Plane):
a = Matrix(l.normal_vector)
b = Matrix(self.normal_vector)
if a.cross(b).is_zero_matrix:
return True
else:
return False
def is_perpendicular(self, l):
"""is the given geometric entity perpendicualar to the given plane?
Parameters
==========
LinearEntity3D or Plane
Returns
=======
Boolean
Examples
========
>>> from sympy import Plane, Point3D
>>> a = Plane(Point3D(1,4,6), normal_vector=(2, 4, 6))
>>> b = Plane(Point3D(2, 2, 2), normal_vector=(-1, 2, -1))
>>> a.is_perpendicular(b)
True
"""
if isinstance(l, LinearEntity3D):
a = Matrix(l.direction_ratio)
b = Matrix(self.normal_vector)
if a.cross(b).is_zero_matrix:
return True
else:
return False
elif isinstance(l, Plane):
a = Matrix(l.normal_vector)
b = Matrix(self.normal_vector)
if a.dot(b) == 0:
return True
else:
return False
else:
return False
@property
def normal_vector(self):
"""Normal vector of the given plane.
Examples
========
>>> from sympy import Point3D, Plane
>>> a = Plane(Point3D(1, 1, 1), Point3D(2, 3, 4), Point3D(2, 2, 2))
>>> a.normal_vector
(-1, 2, -1)
>>> a = Plane(Point3D(1, 1, 1), normal_vector=(1, 4, 7))
>>> a.normal_vector
(1, 4, 7)
"""
return self.args[1]
@property
def p1(self):
"""The only defining point of the plane. Others can be obtained from the
arbitrary_point method.
See Also
========
sympy.geometry.point.Point3D
Examples
========
>>> from sympy import Point3D, Plane
>>> a = Plane(Point3D(1, 1, 1), Point3D(2, 3, 4), Point3D(2, 2, 2))
>>> a.p1
Point3D(1, 1, 1)
"""
return self.args[0]
def parallel_plane(self, pt):
"""
Plane parallel to the given plane and passing through the point pt.
Parameters
==========
pt: Point3D
Returns
=======
Plane
Examples
========
>>> from sympy import Plane, Point3D
>>> a = Plane(Point3D(1, 4, 6), normal_vector=(2, 4, 6))
>>> a.parallel_plane(Point3D(2, 3, 5))
Plane(Point3D(2, 3, 5), (2, 4, 6))
"""
a = self.normal_vector
return Plane(pt, normal_vector=a)
def perpendicular_line(self, pt):
"""A line perpendicular to the given plane.
Parameters
==========
pt: Point3D
Returns
=======
Line3D
Examples
========
>>> from sympy import Plane, Point3D
>>> a = Plane(Point3D(1,4,6), normal_vector=(2, 4, 6))
>>> a.perpendicular_line(Point3D(9, 8, 7))
Line3D(Point3D(9, 8, 7), Point3D(11, 12, 13))
"""
a = self.normal_vector
return Line3D(pt, direction_ratio=a)
def perpendicular_plane(self, *pts):
"""
Return a perpendicular passing through the given points. If the
direction ratio between the points is the same as the Plane's normal
vector then, to select from the infinite number of possible planes,
a third point will be chosen on the z-axis (or the y-axis
if the normal vector is already parallel to the z-axis). If less than
two points are given they will be supplied as follows: if no point is
given then pt1 will be self.p1; if a second point is not given it will
be a point through pt1 on a line parallel to the z-axis (if the normal
is not already the z-axis, otherwise on the line parallel to the
y-axis).
Parameters
==========
pts: 0, 1 or 2 Point3D
Returns
=======
Plane
Examples
========
>>> from sympy import Plane, Point3D
>>> a, b = Point3D(0, 0, 0), Point3D(0, 1, 0)
>>> Z = (0, 0, 1)
>>> p = Plane(a, normal_vector=Z)
>>> p.perpendicular_plane(a, b)
Plane(Point3D(0, 0, 0), (1, 0, 0))
"""
if len(pts) > 2:
raise ValueError('No more than 2 pts should be provided.')
pts = list(pts)
if len(pts) == 0:
pts.append(self.p1)
if len(pts) == 1:
x, y, z = self.normal_vector
if x == y == 0:
dir = (0, 1, 0)
else:
dir = (0, 0, 1)
pts.append(pts[0] + Point3D(*dir))
p1, p2 = [Point(i, dim=3) for i in pts]
l = Line3D(p1, p2)
n = Line3D(p1, direction_ratio=self.normal_vector)
if l in n: # XXX should an error be raised instead?
# there are infinitely many perpendicular planes;
x, y, z = self.normal_vector
if x == y == 0:
# the z axis is the normal so pick a pt on the y-axis
p3 = Point3D(0, 1, 0) # case 1
else:
# else pick a pt on the z axis
p3 = Point3D(0, 0, 1) # case 2
# in case that point is already given, move it a bit
if p3 in l:
p3 *= 2 # case 3
else:
p3 = p1 + Point3D(*self.normal_vector) # case 4
return Plane(p1, p2, p3)
def projection_line(self, line):
"""Project the given line onto the plane through the normal plane
containing the line.
Parameters
==========
LinearEntity or LinearEntity3D
Returns
=======
Point3D, Line3D, Ray3D or Segment3D
Notes
=====
For the interaction between 2D and 3D lines(segments, rays), you should
convert the line to 3D by using this method. For example for finding the
intersection between a 2D and a 3D line, convert the 2D line to a 3D line
by projecting it on a required plane and then proceed to find the
intersection between those lines.
Examples
========
>>> from sympy import Plane, Line, Line3D, Point3D
>>> a = Plane(Point3D(1, 1, 1), normal_vector=(1, 1, 1))
>>> b = Line(Point3D(1, 1), Point3D(2, 2))
>>> a.projection_line(b)
Line3D(Point3D(4/3, 4/3, 1/3), Point3D(5/3, 5/3, -1/3))
>>> c = Line3D(Point3D(1, 1, 1), Point3D(2, 2, 2))
>>> a.projection_line(c)
Point3D(1, 1, 1)
"""
if not isinstance(line, (LinearEntity, LinearEntity3D)):
raise NotImplementedError('Enter a linear entity only')
a, b = self.projection(line.p1), self.projection(line.p2)
if a == b:
# projection does not imply intersection so for
# this case (line parallel to plane's normal) we
# return the projection point
return a
if isinstance(line, (Line, Line3D)):
return Line3D(a, b)
if isinstance(line, (Ray, Ray3D)):
return Ray3D(a, b)
if isinstance(line, (Segment, Segment3D)):
return Segment3D(a, b)
def projection(self, pt):
"""Project the given point onto the plane along the plane normal.
Parameters
==========
Point or Point3D
Returns
=======
Point3D
Examples
========
>>> from sympy import Plane, Point3D
>>> A = Plane(Point3D(1, 1, 2), normal_vector=(1, 1, 1))
The projection is along the normal vector direction, not the z
axis, so (1, 1) does not project to (1, 1, 2) on the plane A:
>>> b = Point3D(1, 1)
>>> A.projection(b)
Point3D(5/3, 5/3, 2/3)
>>> _ in A
True
But the point (1, 1, 2) projects to (1, 1) on the XY-plane:
>>> XY = Plane((0, 0, 0), (0, 0, 1))
>>> XY.projection((1, 1, 2))
Point3D(1, 1, 0)
"""
rv = Point(pt, dim=3)
if rv in self:
return rv
return self.intersection(Line3D(rv, rv + Point3D(self.normal_vector)))[0]
def random_point(self, seed=None):
""" Returns a random point on the Plane.
Returns
=======
Point3D
Examples
========
>>> from sympy import Plane
>>> p = Plane((1, 0, 0), normal_vector=(0, 1, 0))
>>> r = p.random_point(seed=42) # seed value is optional
>>> r.n(3)
Point3D(2.29, 0, -1.35)
The random point can be moved to lie on the circle of radius
1 centered on p1:
>>> c = p.p1 + (r - p.p1).unit
>>> c.distance(p.p1).equals(1)
True
"""
if seed is not None:
rng = random.Random(seed)
else:
rng = random
u, v = Dummy('u'), Dummy('v')
params = {
u: 2*Rational(rng.gauss(0, 1)) - 1,
v: 2*Rational(rng.gauss(0, 1)) - 1}
return self.arbitrary_point(u, v).subs(params)
def parameter_value(self, other, u, v=None):
"""Return the parameter(s) corresponding to the given point.
Examples
========
>>> from sympy import pi, Plane
>>> from sympy.abc import t, u, v
>>> p = Plane((2, 0, 0), (0, 0, 1), (0, 1, 0))
By default, the parameter value returned defines a point
that is a distance of 1 from the Plane's p1 value and
in line with the given point:
>>> on_circle = p.arbitrary_point(t).subs(t, pi/4)
>>> on_circle.distance(p.p1)
1
>>> p.parameter_value(on_circle, t)
{t: pi/4}
Moving the point twice as far from p1 does not change
the parameter value:
>>> off_circle = p.p1 + (on_circle - p.p1)*2
>>> off_circle.distance(p.p1)
2
>>> p.parameter_value(off_circle, t)
{t: pi/4}
If the 2-value parameter is desired, supply the two
parameter symbols and a replacement dictionary will
be returned:
>>> p.parameter_value(on_circle, u, v)
{u: sqrt(10)/10, v: sqrt(10)/30}
>>> p.parameter_value(off_circle, u, v)
{u: sqrt(10)/5, v: sqrt(10)/15}
"""
if not isinstance(other, GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
if not isinstance(other, Point):
raise ValueError("other must be a point")
if other == self.p1:
return other
if isinstance(u, Symbol) and v is None:
delta = self.arbitrary_point(u) - self.p1
eq = delta - (other - self.p1).unit
sol = solve(eq, u, dict=True)
elif isinstance(u, Symbol) and isinstance(v, Symbol):
pt = self.arbitrary_point(u, v)
sol = solve(pt - other, (u, v), dict=True)
else:
raise ValueError('expecting 1 or 2 symbols')
if not sol:
raise ValueError("Given point is not on %s" % func_name(self))
return sol[0] # {t: tval} or {u: uval, v: vval}
@property
def ambient_dimension(self):
return self.p1.ambient_dimension
| 30.53664 | 89 | 0.507716 |
2eda4baa616d44ac3c9449539453b03caff1b9d8 | 17,996 | py | Python | src/Img/filters.py | souji-28/puzzle-python | c4ea07041e7fa617ebfe2c1cbf117eb345c7ce1e | [
"MIT"
] | 60 | 2018-10-13T01:09:04.000Z | 2022-03-19T18:44:17.000Z | src/python/Img/filters.py | bertrand-sifre/Zolver | b98bb7899b459ce4efef72b3abedc453005a0a25 | [
"MIT"
] | 5 | 2019-01-21T15:00:18.000Z | 2022-02-18T14:24:28.000Z | src/python/Img/filters.py | bertrand-sifre/Zolver | b98bb7899b459ce4efef72b3abedc453005a0a25 | [
"MIT"
] | 15 | 2019-10-02T04:44:36.000Z | 2021-12-24T20:52:03.000Z | import colorsys
from colorsys import rgb_to_hls
import cv2
import numpy as np
import math, pickle, os
from Img.Pixel import Pixel, flatten_colors
from Puzzle.Edge import Edge
from Puzzle.Enums import directions, TypeEdge
from Puzzle.PuzzlePiece import PuzzlePiece
import matplotlib.pyplot as plt
import matplotlib
import scipy, sklearn.preprocessing
import itertools
from Img.peak_detect import *
COUNT = 0
def get_relative_angles(cnt, export=False, sigma=5):
"""
Get the relative angles of points of a contour 2 by 2
:param cnt: contour to analyze
:param export: export of the signature with pickle and figure
:param sigma: coefficient used in gaussian filter (the higher the smoother)
:type cnt: list of tuple of points
:return: list of angles
"""
global COUNT
COUNT = COUNT + 1
length = len(cnt)
angles = []
last = np.pi
cnt_tmp = np.array(cnt)
cnt = np.append(cnt, cnt_tmp, axis=0)
cnt = np.append(cnt, cnt_tmp, axis=0)
for i in range(0, len(cnt) - 1):
dir = (cnt[i + 1][0] - cnt[i][0], cnt[i + 1][1] - cnt[i][1])
angle = math.atan2(-dir[1], dir[0])
while (angle < last - np.pi):
angle += 2 * np.pi
while (angle > last + np.pi):
angle -= 2 * np.pi
angles.append(angle)
last = angle
angles = np.diff(angles)
k = [0.33,0.33,0.33,0.33,0.33]
angles = scipy.ndimage.convolve(angles, k, mode='constant', cval=0.0)
angles = scipy.ndimage.filters.gaussian_filter(angles, sigma)
angles = np.roll(np.array(angles), -length)
angles = angles[0:length]
if export:
pickle.dump(angles, open("/tmp/save" + str(COUNT) + ".p", "wb"))
plt.plot(np.append(angles, angles))
plt.savefig("/tmp/fig" + str(COUNT) + ".png")
plt.clf()
plt.cla()
plt.close()
return angles
def is_maximum_local(index, relative_angles, radius):
"""
Determine if a point at index is a maximum local in radius range of relative_angles function
:param index: index of the point to check in relative_angles list
:param relative_angles: list of angles
:param radius: radius used to check neighbors
:return: Boolean
"""
start = max(0, index - radius)
end = min(relative_angles.shape[0] - 1, index + radius)
for i in range(start, end + 1):
if relative_angles[i] > relative_angles[index]:
return False
return True
def longest_peak(relative_angles):
"""
Find the longest area < 0
:param relative_angles: list of angles
:return: coordinates of the area
"""
length = relative_angles.shape[0]
longest = (0, 0)
j = 0
for i in range(length):
if relative_angles[i] >= 0:
j = i
if i - j > longest[1] - longest[0]:
longest = (j, i)
return longest
def distance_signature(relative_angles):
"""
Distance of each points to the line formed by first and last points
:param relative_angles: list of angles
:return: List of floats
"""
length = relative_angles.shape[0]
l1 = np.array([0, relative_angles[0]])
l2 = np.array([length - 1, relative_angles[-1]])
signature = np.zeros((length, 1))
for i in range(length):
assert(np.linalg.norm(l2 - l1) != 0)
signature[i] = np.linalg.norm(np.cross(l2 - l1, l1 - np.array([i, relative_angles[i]]))) / np.linalg.norm(l2 - l1)
return signature
def flat_score(relative_angles):
"""
Compute the flat score of relative_angles
:param relative_angles: list of angles
:return: List of floats
"""
length = relative_angles.shape[0]
distances = distance_signature(relative_angles)
diff = 0
for i in range(length):
diff = max(diff, abs(distances[i]))
return diff
def indent_score(relative_angles):
"""
Compute score for indent part
:param relative_angles: list of angles
:return: List of floats
"""
length = relative_angles.shape[0]
peak = longest_peak(relative_angles)
while peak[0] > 0 and not is_maximum_local(peak[0], relative_angles, 10):
peak = (peak[0] - 1, peak[1])
while peak[1] < length - 1 and not is_maximum_local(peak[1], relative_angles, 10):
peak = (peak[0], peak[1] + 1)
shape = np.zeros((peak[0] + length - peak[1], 1))
for i in range(peak[0] + 1):
shape[i] = relative_angles[i]
for i in range(peak[1], length):
shape[i - peak[1] + peak[0]] = relative_angles[i]
# FIX FOR FUNCTIONS > 0
if shape.shape[0] == 1:
return flat_score(relative_angles)
return flat_score(shape)
def outdent_score(relative_angles):
"""
Compute score for outdent part
:param relative_angles: list of angles
:return: List of floats
"""
return indent_score(-relative_angles)
def compute_comp(combs_l, relative_angles, method='correlate'):
"""
Compute score for each combinations of 4 points and return the index of the best
:param combs_l: list of combinations of 4 points
:param relative_angles: List of angles
:return: Int
"""
# Combinations of 4 points
global COUNT
MY_COUNT = 0
results_glob = []
for comb_t in combs_l:
# Roll the values of relative angles for this combination
offset = len(relative_angles) - comb_t[3] - 1
relative_angles_tmp = np.roll(relative_angles, offset)
comb_t += offset
comb_t = [(0, comb_t[0]), (comb_t[0], comb_t[1]), (comb_t[1], comb_t[2]), (comb_t[2], comb_t[3])]
results_comp = []
for comb in comb_t:
hole, head, border = 0, 0, 0
if method == 'flat':
hole = indent_score(np.ravel(np.array(relative_angles_tmp[comb[0]:comb[1]])))
head = outdent_score(np.ravel(np.array(relative_angles_tmp[comb[0]:comb[1]])))
border = flat_score(np.ravel(np.array(relative_angles_tmp[comb[0]:comb[1]])))
if hole != border:
results_comp.append(np.min([hole, head]))
else:
results_comp.append(border)
results_glob.append(np.sum(results_comp))
return np.argmin(np.array(results_glob))
def peaks_inside(comb, peaks):
"""
Check the number of peaks inside comb
:param comb: Tuple of coordinates
:param peaks: List of peaks to check
:return: Int
"""
cpt = []
if len(comb) == 0:
return cpt
for peak in peaks:
if peak > comb[0] and peak < comb[-1]:
cpt.append(peak)
return cpt
def is_pattern(comb, peaks):
"""
Check if the peaks formed an outdent or an indent pattern
:param comb: Tuple of coordinates
:param peaks: List of peaks
:return: Int
"""
cpt = len(peaks_inside(comb, peaks))
return cpt == 0 or cpt == 2 or cpt == 3
def is_acceptable_comb(combs, peaks, length):
"""
Check if a combination is composed of acceptable patterns.
Used to filter the obviously bad combinations quickly.
:param comb: Tuple of coordinates
:param peaks: List of peaks
:param length: Length of the signature (used for offset computation)
:return: Boolean
"""
offset = length - combs[3] - 1
combs_tmp = combs + offset
peaks_tmp = (peaks + offset) % length
return is_pattern([0, combs_tmp[0]], peaks_tmp) and is_pattern([combs_tmp[0], combs_tmp[1]], peaks_tmp) and is_pattern([combs_tmp[1], combs_tmp[2]], peaks_tmp) and is_pattern([combs_tmp[2], combs_tmp[3]], peaks_tmp)
def type_peak(peaks_pos_inside, peaks_neg_inside):
"""
Determine the type of lists of pos and neg peaks
:param peaks_pos_inside: List of positive peaks
:param peaks_neg_inside: List of negative peaks
:return: TypeEdge
"""
if len(peaks_pos_inside) == 0 and len(peaks_neg_inside) == 0:
return TypeEdge.BORDER
if len(peaks_inside(peaks_pos_inside, peaks_neg_inside)) == 2:
return TypeEdge.HOLE
if len(peaks_inside(peaks_neg_inside, peaks_pos_inside)) == 2:
return TypeEdge.HEAD
return TypeEdge.UNDEFINED
def my_find_corner_signature(cnt, green=False):
"""
Determine the corner/edge positions by analyzing contours.
:param cnt: contour to analyze
:param green: boolean used to activate green background mode
:type cnt: list of tuple of points
:return: Corners coordinates, Edges lists of points, type of pieces
"""
edges = []
combs_final = []
types_pieces = []
sigma = 5
max_sigma = 12
if not green:
sigma = 5
max_sigma = 15
while sigma <= max_sigma:
print("Smooth curve with sigma={}...".format(sigma))
tmp_combs_final = []
# Find relative angles
cnt_convert = [c[0] for c in cnt]
relative_angles = get_relative_angles(np.array(cnt_convert), export=False, sigma=sigma)
relative_angles = np.array(relative_angles)
relative_angles_inverse = -np.array(relative_angles)
extr_tmp = detect_peaks(relative_angles, mph=0.3*np.max(relative_angles))
relative_angles = np.roll(relative_angles, int(len(relative_angles) / 2))
extr_tmp = np.append(extr_tmp, (detect_peaks(relative_angles, mph=0.3*max(relative_angles)) - int(len(relative_angles) / 2)) % len(relative_angles), axis=0)
relative_angles = np.roll(relative_angles, -int(len(relative_angles) / 2))
extr_tmp = np.unique(extr_tmp)
extr_tmp_inverse = detect_peaks(relative_angles_inverse, mph=0.3*np.max(relative_angles_inverse))
relative_angles_inverse = np.roll(relative_angles_inverse, int(len(relative_angles_inverse) / 2))
extr_tmp_inverse = np.append(extr_tmp_inverse, (detect_peaks(relative_angles_inverse, mph=0.3*max(relative_angles_inverse)) - int(len(relative_angles_inverse) / 2)) % len(relative_angles_inverse), axis=0)
relative_angles_inverse = np.roll(relative_angles_inverse, -int(len(relative_angles_inverse) / 2))
extr_tmp_inverse = np.unique(extr_tmp_inverse)
extr = extr_tmp
extr_inverse = extr_tmp_inverse
relative_angles = sklearn.preprocessing.normalize(relative_angles[:,np.newaxis], axis=0).ravel()
# Build list of permutations of 4 points
combs = itertools.permutations(extr, 4)
combs_l = list(combs)
OFFSET_LOW = len(relative_angles) / 8
OFFSET_HIGH = len(relative_angles) / 2.0
for icomb, comb in enumerate(combs_l):
if ((comb[0] > comb[1]) and (comb[1] > comb[2]) and (comb[2] > comb[3])
and ((comb[0] - comb[1]) > OFFSET_LOW) and ((comb[0] - comb[1]) < OFFSET_HIGH)
and ((comb[1] - comb[2]) > OFFSET_LOW) and ((comb[1] - comb[2]) < OFFSET_HIGH)
and ((comb[2] - comb[3]) > OFFSET_LOW) and ((comb[2] - comb[3]) < OFFSET_HIGH)
and ((comb[3] + (len(relative_angles) - comb[0])) > OFFSET_LOW) and ((comb[3] + (len(relative_angles) - comb[0])) < OFFSET_HIGH)):
if is_acceptable_comb((comb[3], comb[2], comb[1], comb[0]), extr, len(relative_angles)) and is_acceptable_comb((comb[3], comb[2], comb[1], comb[0]), extr_inverse, len(relative_angles)):
tmp_combs_final.append((comb[3], comb[2], comb[1], comb[0]))
sigma += 1
if len(tmp_combs_final) == 0:
continue
best_fit = tmp_combs_final[compute_comp(tmp_combs_final, relative_angles, method='flat')]
# Roll the values of relative angles for this combination
offset = len(relative_angles) - best_fit[3] - 1
relative_angles = np.roll(relative_angles, offset)
best_fit += offset
extr = (extr + offset) % len(relative_angles)
extr_inverse = (extr_inverse + offset) % len(relative_angles)
tmp_types_pieces = []
no_undefined = True
for best_comb in [[0, best_fit[0]], [best_fit[0], best_fit[1]], [best_fit[1], best_fit[2]], [best_fit[2], best_fit[3]]]:
pos_peaks_inside = peaks_inside(best_comb, extr)
neg_peaks_inside = peaks_inside(best_comb, extr_inverse)
pos_peaks_inside.sort()
neg_peaks_inside.sort()
tmp_types_pieces.append(type_peak(pos_peaks_inside, neg_peaks_inside))
if (tmp_types_pieces[-1] == TypeEdge.UNDEFINED):
no_undefined = False
combs_final = tmp_combs_final
types_pieces = tmp_types_pieces
if no_undefined:
break
if (len(types_pieces) != 0 and types_pieces[-1] == TypeEdge.UNDEFINED):
print("UNDEFINED FOUND - try to continue but something bad happened :(")
print(tmp_types_pieces[-1])
best_fit_tmp = best_fit - offset
for i in range(3):
edges.append(cnt[best_fit_tmp[i]:best_fit_tmp[i + 1]])
edges.append(np.concatenate((cnt[best_fit_tmp[3]:], cnt[:best_fit_tmp[0]]), axis=0))
edges = [np.array([x[0] for x in e]) for e in edges] # quick'n'dirty fix of the shape
types_pieces.append(types_pieces[0])
return best_fit, edges, types_pieces[1:]
def angle_between(v1, v2):
"""
Return the angles between two tuples
:param v1: first tuple of coordinates
:param v2: second tuple of coordinates
:return: distance Float
"""
return math.atan2(-v1[1], v1[0]) - math.atan2(-v2[1], v2[0])
def export_contours(img, img_bw, contours, path, modulo, viewer=None, green=False):
"""
Find the corners/shapes of all contours and build an array of puzzle Pieces
:param img: matrix of the img
:param img_bw: matrix of the img in black and white
:param contours: lists of tuples of coordinates of contours
:param path: Path used to export pieces img
:path viewer: Object used for GUI display
:param green: boolean used to activate green background mode
:return: puzzle Piece array
"""
puzzle_pieces = []
list_img = []
out_color = np.zeros_like(img)
for idx, cnt in enumerate(contours):
corners, edges_shape, types_edges = my_find_corner_signature(cnt, green)
if corners is None:
return None
mask_border = np.zeros_like(img_bw)
mask_full = np.zeros_like(img_bw)
mask_full = cv2.drawContours(mask_full, contours, idx, 255, -1)
mask_border = cv2.drawContours(mask_border, contours, idx, 255, 1)
img_piece = np.zeros_like(img)
img_piece[mask_full == 255] = img[mask_full == 255]
pixels = []
for x, y in tuple(zip(*np.where(mask_full == 255))):
pixels.append(Pixel((x, y), img_piece[x, y]))
color_vect = []
# go faster, use only a subset of the img with the piece
x_bound, y_bound, w_bound, h_bound = cv2.boundingRect(cnt)
img_piece_tiny = img_piece[y_bound:y_bound + h_bound, x_bound:x_bound + w_bound]
mask_border_tiny = mask_border[y_bound:y_bound + h_bound, x_bound:x_bound + w_bound]
mask_full_tiny = mask_full[y_bound:y_bound + h_bound, x_bound:x_bound + w_bound]
mask_around_tiny = np.zeros_like(mask_full_tiny)
mask_inv_border_tiny = cv2.bitwise_not(mask_border_tiny)
mask_full_tiny = cv2.bitwise_and(mask_full_tiny, mask_full_tiny, mask=mask_inv_border_tiny)
for i in range(4):
color_edge = []
for ip, p in enumerate(edges_shape[i]):
CIRCLE_SIZE = 5
if ip != 0:
p2 = edges_shape[i][ip - 1]
cv2.circle(mask_around_tiny, (p2[0] - x_bound, p2[1] - y_bound), CIRCLE_SIZE, 0, -1)
cv2.circle(mask_around_tiny, (p[0] - x_bound, p[1] - y_bound), CIRCLE_SIZE, 255, -1)
mask_around_tiny = cv2.bitwise_and(mask_around_tiny, mask_around_tiny, mask=mask_full_tiny)
neighbors_color = []
for y, x in tuple(zip(*np.where(mask_around_tiny == 255))):
neighbors_color.append(img_piece_tiny[y, x])
rgb = flatten_colors(neighbors_color)
hsl = np.array(colorsys.rgb_to_hls(rgb[2] / 255.0, rgb[1] / 255.0, rgb[0] / 255.0))
color_edge.append(hsl)
out_color[p[1], p[0]] = rgb
color_vect.append(np.array(color_edge))
edges = []
cpt = 0
for s, c in zip(edges_shape, color_vect):
edges.append(Edge(s, c, type=types_edges[cpt]))
cpt += 1
for i, e in enumerate(edges):
e.direction = directions[i]
if e.type == TypeEdge.BORDER:
e.connected = True
puzzle_pieces.append(PuzzlePiece(edges, pixels))
mask_border = np.zeros_like(img_bw)
for i in range(4):
for p in edges_shape[i]:
mask_border[p[1], p[0]] = 255
out = np.zeros_like(img_bw)
out[mask_border == 255] = img_bw[mask_border == 255]
x, y, w, h = cv2.boundingRect(cnt)
out2 = out[y:y + h, x:x + w]
list_img.append(out2)
max_height = max([x.shape[0] for x in list_img])
max_width = max([x.shape[1] for x in list_img])
pieces_img = np.zeros([max_height * (int(len(list_img) / modulo) + 1), max_width * modulo], dtype=np.uint8)
for index, image in enumerate(list_img):
pieces_img[(max_height * int(index / modulo)):(max_height * int(index / modulo) + image.shape[0]),
(max_width * (index % modulo)):(max_width * (index % modulo) + image.shape[1])] = image
cv2.imwrite("/tmp/color_border.png", out_color)
cv2.imwrite(path, pieces_img)
if viewer:
viewer.addImage("Extracted colored border", "/tmp/color_border.png")
return puzzle_pieces
| 35.635644 | 219 | 0.622083 |
1f0f9144cd9b1772e635bc457f7aa93f6e1dd456 | 2,443 | py | Python | train.py | qingquansong/CSCE689 | 8f23ffed5dff3652499b8bba9075fd1c109f7056 | [
"MIT"
] | null | null | null | train.py | qingquansong/CSCE689 | 8f23ffed5dff3652499b8bba9075fd1c109f7056 | [
"MIT"
] | null | null | null | train.py | qingquansong/CSCE689 | 8f23ffed5dff3652499b8bba9075fd1c109f7056 | [
"MIT"
] | null | null | null | import torch
from torch.autograd import Variable
import time
import os
import sys
from utils import AverageMeter, calculate_accuracy
def train_epoch(epoch, data_loader, model, criterion, optimizer, opt,
epoch_logger, batch_logger):
print('train at epoch {}'.format(epoch))
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
accuracies = AverageMeter()
end_time = time.time()
for i, (inputs, targets) in enumerate(data_loader):
data_time.update(time.time() - end_time)
if not opt.no_cuda:
targets = targets.cuda(async=True)
inputs = Variable(inputs)
targets = Variable(targets)
outputs = model(inputs)
loss = criterion(outputs, targets)
acc = calculate_accuracy(outputs, targets)
losses.update(loss.data, inputs.size(0))
accuracies.update(acc, inputs.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end_time)
end_time = time.time()
batch_logger.log({
'epoch': epoch,
'batch': i + 1,
'iter': (epoch - 1) * len(data_loader) + (i + 1),
'loss': losses.val,
'acc': accuracies.val,
'lr': optimizer.param_groups[0]['lr']
})
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc {acc.val:.3f} ({acc.avg:.3f})'.format(
epoch,
i + 1,
len(data_loader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
acc=accuracies))
epoch_logger.log({
'epoch': epoch,
'loss': losses.avg,
'acc': accuracies.avg,
'lr': optimizer.param_groups[0]['lr']
})
if epoch % opt.checkpoint == 0:
save_file_path = os.path.join(opt.result_path,
'save_{}.pth'.format(epoch))
states = {
'epoch': epoch + 1,
'arch': opt.arch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
torch.save(states, save_file_path)
| 29.792683 | 69 | 0.536226 |
430e338db9a22ccfcb1e9ef756c92e8664988e2b | 131 | py | Python | zpipe/utils/__init__.py | jheo4/zpipe | 8cc3ac38b0f999ae4c1a29e63818457c4b7d6cf6 | [
"MIT"
] | 1 | 2020-06-28T02:31:08.000Z | 2020-06-28T02:31:08.000Z | zpipe/utils/__init__.py | jheo4/zpipe | 8cc3ac38b0f999ae4c1a29e63818457c4b7d6cf6 | [
"MIT"
] | 6 | 2020-06-28T01:52:45.000Z | 2020-07-10T03:55:41.000Z | zpipe/utils/__init__.py | jheo4/zpipe | 8cc3ac38b0f999ae4c1a29e63818457c4b7d6cf6 | [
"MIT"
] | null | null | null | from .pickles import send_zero_copy
from .pickles import recv_zero_copy
from .pickles import pickle_keypoints
from .ztypes import * | 32.75 | 37 | 0.847328 |
2dfcb53b595e67627c600df6ce26b0fc296bc9e7 | 413 | py | Python | proyecto_encuestas/wsgi.py | JCYanes/ProyectoEncuestas | a19ac8df5607f322bfce98c7edf5387987021a8a | [
"MIT"
] | null | null | null | proyecto_encuestas/wsgi.py | JCYanes/ProyectoEncuestas | a19ac8df5607f322bfce98c7edf5387987021a8a | [
"MIT"
] | null | null | null | proyecto_encuestas/wsgi.py | JCYanes/ProyectoEncuestas | a19ac8df5607f322bfce98c7edf5387987021a8a | [
"MIT"
] | null | null | null | """
WSGI config for proyecto_encuestas project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'proyecto_encuestas.settings')
application = get_wsgi_application()
| 24.294118 | 78 | 0.79661 |
fc0f9da2eef29657ba68dfa3929ecd42789e1da6 | 13,112 | py | Python | Astar/Astar_algorithm.py | ShervinAmbrose/Path-Finding-Algorithms-Visualised | ee93cfe98d2b9db13ba3cedaec6828b32d5de9f3 | [
"MIT"
] | null | null | null | Astar/Astar_algorithm.py | ShervinAmbrose/Path-Finding-Algorithms-Visualised | ee93cfe98d2b9db13ba3cedaec6828b32d5de9f3 | [
"MIT"
] | null | null | null | Astar/Astar_algorithm.py | ShervinAmbrose/Path-Finding-Algorithms-Visualised | ee93cfe98d2b9db13ba3cedaec6828b32d5de9f3 | [
"MIT"
] | 1 | 2021-10-02T03:36:30.000Z | 2021-10-02T03:36:30.000Z | import pygame
import sys
import math
import random
'''
Change this to change the screen size and number of boxes
recommended is WIN = 800, ROWS = 50
you can aslo change it to WIN = 400, ROWS = 25
'''
WIN = 800
ROWS = 50
CAPTION = 'A* path finding algorithm'
SCREEN = pygame.display.set_mode((WIN, WIN))
'''Number of parentCell neighbours, Choose 4 or 8 only.
Each cell can either have 4 neighbours (Right, Bottom, Left, Top)
Or 8 neighbours (Right, Bottom-Right, Bottom, Bottom-Left, Left, Top-Left, Top, Top-Right)
Recommended is 8
'''
neigh = 4
GREEN = (0, 255, 0)
PURPLE = (255, 0, 255)
YELLOW = (255, 255, 0)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
GREY = (128, 128, 128)
TURQUOISE = (64, 224, 208)
'''
Main Class for entire code
'''
class App(object):
def __init__(self):
self.done = False
self.grid = []
self.gridSize = WIN // ROWS
self.startDone = False
self.endDone = True
self.barrierDone = True
self.barriers = False
self.startCoord = None
self.endCoord = None
self.barrierPos = []
self.pathDict = {}
self.pathFound = True
self.removeBarrier = []
'''
Stores the (x, y) co-ordinates
'''
def makeGrid(self):
for i in range(ROWS):
self.grid.append([])
for j in range(ROWS):
self.grid[i].append((j * self.gridSize, i * self.gridSize))
self.drawGrid()
'''
Draws vertical and horizontal line on the (x, y) co-ordinates
'''
def drawGrid(self):
for i in range(ROWS):
pygame.draw.line(
SCREEN, BLACK, (0, self.grid[i][i][0]), (WIN, self.grid[i][i][0]))
for j in range(ROWS):
pygame.draw.line(
SCREEN, BLACK, (self.grid[i][j][1], 0), (self.grid[i][j][1], WIN))
pygame.display.update()
'''
One draw Function to draw the start, end, barriers, erase-bariers and the output path.
'''
def draw(self, coord, colour):
x, y = coord
pygame.draw.rect(
SCREEN, colour, (self.grid[x][y][1], self.grid[x][y][0], self.gridSize, self.gridSize))
pygame.display.update()
self.drawGrid()
'''
Different H and G scores to play with
'''
def h(self, start, end):
x1, y1 = start
x2, y2 = end
# return abs(y1 - y2) + abs(x1 - x2)
return min(abs(x1 - x2), abs(y1 - y2)) * 14 + abs(abs(x1 - x2) - abs(y1 - y2)) * 10
def g(self, start, currentCell):
x1, y1 = start
x2, y2 = currentCell
# return abs(y1 - y2) + abs(x1 - x2)
return min(abs(x1 - x2), abs(y1 - y2)) * 14 + abs(abs(x1 - x2) - abs(y1 - y2)) * 10
'''Neighbours of parent cell, Column x Row'''
def algoNeighbours(self, cell):
k = []
if neigh == 8:
n = [(cell[0] + 1, cell[1]), (cell[0] + 1, cell[1] + 1), (cell[0], cell[1] + 1), (cell[0] - 1, cell[1] + 1),
(cell[0] - 1, cell[1]), (cell[0] - 1, cell[1] - 1), (cell[0], cell[1] - 1), (cell[0] + 1, cell[1] - 1)]
elif neigh == 4:
n = [(cell[0] + 1, cell[1]), (cell[0], cell[1] + 1),
(cell[0] - 1, cell[1]), (cell[0], cell[1] - 1)]
for i in range(neigh):
if n[i][0] >= 0 and n[i][1] >= 0 and n[i][0] < ROWS and n[i][1] < ROWS and n[i] not in self.barrierPos and n[i] != self.startCoord and n[i] not in self.pathDict:
k.append(n[i])
return k
def reconstructNeighbours(self, cell, cellDict):
k = []
if neigh == 8:
n = [(cell[0] + 1, cell[1]), (cell[0] + 1, cell[1] + 1), (cell[0], cell[1] + 1), (cell[0] - 1, cell[1] + 1),
(cell[0] - 1, cell[1]), (cell[0] - 1, cell[1] - 1), (cell[0], cell[1] - 1), (cell[0] + 1, cell[1] - 1)]
elif neigh == 4:
n = [(cell[0] + 1, cell[1]), (cell[0], cell[1] + 1),
(cell[0] - 1, cell[1]), (cell[0], cell[1] - 1)]
for i in range(neigh):
if n[i][0] >= 0 and n[i][1] >= 0 and n[i][0] < ROWS and n[i][1] < ROWS and n[i] not in self.barrierPos and n[i] in self.pathDict and n[i] not in cellDict:
k.append(n[i])
if n[i] == self.endCoord:
k.append(n[i])
return k
def reconstructPath(self, cell):
k = []
if neigh == 8:
n = [(cell[0] + 1, cell[1]), (cell[0] + 1, cell[1] + 1), (cell[0], cell[1] + 1), (cell[0] - 1, cell[1] + 1),
(cell[0] - 1, cell[1]), (cell[0] - 1, cell[1] - 1), (cell[0], cell[1] - 1), (cell[0] + 1, cell[1] - 1)]
elif neigh == 4:
n = [(cell[0] + 1, cell[1]), (cell[0], cell[1] + 1),
(cell[0] - 1, cell[1]), (cell[0], cell[1] - 1)]
for i in range(neigh):
if n[i][0] >= 0 and n[i][1] >= 0 and n[i][0] < ROWS and n[i][1] < ROWS and n[i] not in self.barrierPos and n[i] in self.pathDict:
k.append(n[i])
return k
def algoCalcultions(self, lowestF, costDict):
if len(lowestF) == 1:
return lowestF[0]
else:
tempHcost = min(value[1]
for key, value in costDict.items() if key in lowestF)
for key in lowestF:
data = costDict[key]
if data[1] == tempHcost:
return key
else:
continue
'''
Basic working of algorithm is that it takes the starting cell as parentCell and determines its
neighbours(children), the G, H and F cost of each neighbour is calculated and the neighbour
with lowest F cost becomes the new parent cell, this process is repeated until the parentCell
is equal to the endCoordinate.
'''
def algorithm(self):
parentCell = self.startCoord
costDict = {}
while parentCell != self.endCoord:
children = self.algoNeighbours(parentCell)
for i in range(len(children)):
gCost = self.g(self.startCoord, children[i])
hCost = self.h(children[i], self.endCoord)
fCost = gCost + hCost
if children[i] != self.endCoord:
self.draw(children[i], YELLOW)
costDict[children[i]] = [gCost, hCost, fCost]
if costDict:
tempFcost = min(data[2] for data in costDict.values())
lowestF = [key for key, value in costDict.items()
if value[2] == tempFcost]
parentCell = self.algoCalcultions(lowestF, costDict)
if parentCell == self.endCoord:
break
self.pathDict[parentCell] = costDict.pop(parentCell)
else:
self.noSolution()
if parentCell != self.endCoord:
self.draw(parentCell, WHITE)
'''
Once the cells from start to end are explored,
each explored cell is assigned a number
'''
number = 1
parentCell = self.startCoord
cellDict = {}
children = self.reconstructNeighbours(parentCell, cellDict)
for child in children:
cellDict[child] = number
number += 1
cell = 1
while parentCell != self.endCoord:
parentCell = [key for key, value in cellDict.items()
if value == cell]
children = self.reconstructNeighbours(parentCell[0], cellDict)
if len(children) > 0 and children[0] == self.endCoord:
break
for child in children:
cellDict[child] = number
# self.draw(child, PURPLE)
number += 1
cell += 1
'''
After assigning numbers to each cell,
the path from end to star is determined by
choosing the endCell as parentCell, then determining
its neighbours and choosing the neighbour with lowest
number as the new parent cell
'''
donePath = False
startCoordNeigh = self.reconstructPath(self.startCoord)
reversedCellDict = {}
keysList = cellDict.keys()
reversedKeysList = list(reversed(keysList))
for key in reversedKeysList:
reversedCellDict[key] = cellDict.pop(key)
endCoordNeigh = self.reconstructPath(self.endCoord)
for key in reversedKeysList:
if key in endCoordNeigh:
parentCell = key
break
while not donePath:
self.draw(parentCell, PURPLE)
children = []
children = self.reconstructPath(parentCell)
if len(children) == 1:
del reversedCellDict[parentCell]
del self.pathDict[parentCell]
parentCell = children[0]
else:
del reversedCellDict[parentCell]
del self.pathDict[parentCell]
temp = min(
value for key, value in reversedCellDict.items() if key in children)
tempCell = [key for key,
value in reversedCellDict.items() if value == temp]
parentCell = tempCell[0]
if parentCell in startCoordNeigh:
self.draw(parentCell, PURPLE)
donePath = True
'''
Takes the mouse (x, y) position and converts it to
the grid's (x, y) coordinate
'''
def coordCalculation(self, pos):
x, y = pos
x = x // self.gridSize
y = y // self.gridSize
return (x, y)
'''
If no solution is found press escape or the cross mark
'''
def noSolution(self):
running = True
keys = pygame.key.get_pressed()
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT or keys[pygame.K_ESCAPE]:
running = False
pygame.quit()
sys.exit()
'''
An event loop for calling start, end and barrier.
'''
def callEvent(self):
keys = pygame.key.get_pressed()
for event in pygame.event.get():
if event.type == pygame.QUIT or keys[pygame.K_ESCAPE]:
self.done = True
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
if not self.startDone:
self.startCoord = self.coordCalculation(event.pos)
self.draw(self.startCoord, GREEN)
self.startDone = True
self.endDone = False
elif not self.endDone:
self.endCoord = self.coordCalculation(event.pos)
self.draw(self.endCoord, TURQUOISE)
self.endDone = True
self.pathFound = False
self.barrierDone = False
elif not self.barrierDone:
self.barriers = True
if pygame.mouse.get_pressed()[2] == 1:
(x, y) = self.coordCalculation(pygame.mouse.get_pos())
if (x, y) != self.startCoord and (x, y) != self.endCoord:
self.removeBarrier.append((x, y))
self.draw((x, y), GREY)
self.barrierPos = list(
set(self.barrierPos) - set(self.removeBarrier))
self.removeBarrier = []
if not keys[pygame.K_SPACE]:
if self.barriers and pygame.mouse.get_pressed()[0] == 1:
(x, y) = self.coordCalculation(pygame.mouse.get_pos())
self.barrierPos.append((x, y))
self.draw((x, y), BLACK)
self.barrierPos = list(
set(self.barrierPos) - set(self.removeBarrier))
self.removeBarrier = []
else:
self.barriers = False
self.barrierDone = True
if not self.pathFound:
self.algorithm()
self.pathFound = True
'''
Press 'c' to clear the display and start over again
'''
if keys[pygame.K_c]:
SCREEN.fill(GREY)
self.makeGrid()
self.barrierDone = True
self.barriers = False
self.startDone = False
self.endDone = True
self.barrierPos = []
self.pathFound = False
self.pathDict = {}
self.removeBarrier = []
if keys[pygame.K_r]:
for i in range((ROWS * ROWS) // 6):
x = random.randint(0, ROWS - 1)
y = random.randint(0, ROWS - 1)
if (x, y) != self.startCoord and (x, y) != self.endCoord:
self.barrierPos.append((x, y))
self.draw((x, y), BLACK)
def mainLoop(self):
SCREEN.fill(GREY)
self.makeGrid()
while not self.done:
self.callEvent()
def main():
pygame.init()
pygame.display.set_caption(CAPTION)
App().mainLoop()
pygame.quit()
sys.exit()
if __name__ == "__main__":
main()
| 36.121212 | 173 | 0.515024 |
9365366b67c4d4be66f707509648d03770ee12ee | 17,614 | py | Python | src/olympia/scanners/tasks.py | dnutiu/addons-server | f42e8d44fc21bf9d40968b1732f62d8465a8389e | [
"BSD-3-Clause"
] | 1 | 2020-11-30T18:58:30.000Z | 2020-11-30T18:58:30.000Z | src/olympia/scanners/tasks.py | dnutiu/addons-server | f42e8d44fc21bf9d40968b1732f62d8465a8389e | [
"BSD-3-Clause"
] | null | null | null | src/olympia/scanners/tasks.py | dnutiu/addons-server | f42e8d44fc21bf9d40968b1732f62d8465a8389e | [
"BSD-3-Clause"
] | null | null | null | import os
import uuid
from django.conf import settings
import requests
import waffle
import yara
from django_statsd.clients import statsd
import olympia.core.logger
from olympia import amo
from olympia.amo.celery import create_chunked_tasks_signatures, task
from olympia.amo.decorators import use_primary_db
from olympia.constants.scanners import (
ABORTED,
ABORTING,
COMPLETED,
CUSTOMS,
MAD,
RUNNING,
SCANNERS,
WAT,
YARA,
)
from olympia.devhub.tasks import validation_task
from olympia.files.models import FileUpload
from olympia.files.utils import SafeZip
from olympia.versions.models import Version
from .models import (
ImproperScannerQueryRuleStateError, ScannerQueryResult, ScannerQueryRule,
ScannerResult, ScannerRule)
log = olympia.core.logger.getLogger('z.scanners.task')
def run_scanner(results, upload_pk, scanner, api_url, api_key):
"""
Run a scanner on a FileUpload via RPC and store the results.
- `results` are the validation results passed in the validation chain. This
task is a validation task, which is why it must receive the validation
results as first argument.
- `upload_pk` is the FileUpload ID.
"""
scanner_name = SCANNERS.get(scanner)
log.info('Starting scanner "%s" task for FileUpload %s.', scanner_name,
upload_pk)
if not results['metadata']['is_webextension']:
log.info('Not running scanner "%s" for FileUpload %s, it is not a '
'webextension.', scanner_name, upload_pk)
return results
upload = FileUpload.objects.get(pk=upload_pk)
try:
if not os.path.exists(upload.path):
raise ValueError('File "{}" does not exist.'.format(upload.path))
scanner_result = ScannerResult(upload=upload, scanner=scanner)
with statsd.timer('devhub.{}'.format(scanner_name)):
_run_scanner_for_url(
scanner_result, upload.get_authenticated_download_url(),
scanner, api_url, api_key)
scanner_result.save()
if scanner_result.has_matches:
statsd.incr('devhub.{}.has_matches'.format(scanner_name))
for scanner_rule in scanner_result.matched_rules.all():
statsd.incr(
'devhub.{}.rule.{}.match'.format(
scanner_name, scanner_rule.id
)
)
statsd.incr('devhub.{}.success'.format(scanner_name))
log.info('Ending scanner "%s" task for FileUpload %s.', scanner_name,
upload_pk)
except Exception:
statsd.incr('devhub.{}.failure'.format(scanner_name))
# We log the exception but we do not raise to avoid perturbing the
# submission flow.
log.exception('Error in scanner "%s" task for FileUpload %s.',
scanner_name, upload_pk)
return results
def _run_scanner_for_url(scanner_result, url, scanner, api_url, api_key):
"""
Inner function to run a scanner on a particular URL via RPC and add results
to the given scanner_result. The caller is responsible for saving the
scanner_result to the database.
"""
json_payload = {
'api_key': api_key,
'download_url': url,
}
response = requests.post(url=api_url,
json=json_payload,
timeout=settings.SCANNER_TIMEOUT)
try:
data = response.json()
except ValueError:
# Log the response body when JSON decoding has failed.
raise ValueError(response.text)
if response.status_code != 200 or 'error' in data:
raise ValueError(data)
scanner_result.results = data
@validation_task
def run_customs(results, upload_pk):
"""
Run the customs scanner on a FileUpload and store the results.
This task is intended to be run as part of the submission process only.
When a version is created from a FileUpload, the files are removed. In
addition, we usually delete old FileUpload entries after 180 days.
- `results` are the validation results passed in the validation chain. This
task is a validation task, which is why it must receive the validation
results as first argument.
- `upload_pk` is the FileUpload ID.
"""
return run_scanner(
results,
upload_pk,
scanner=CUSTOMS,
api_url=settings.CUSTOMS_API_URL,
api_key=settings.CUSTOMS_API_KEY,
)
@validation_task
def run_wat(results, upload_pk):
"""
Run the wat scanner on a FileUpload and store the results.
This task is intended to be run as part of the submission process only.
When a version is created from a FileUpload, the files are removed. In
addition, we usually delete old FileUpload entries after 180 days.
- `results` are the validation results passed in the validation chain. This
task is a validation task, which is why it must receive the validation
results as first argument.
- `upload_pk` is the FileUpload ID.
"""
return run_scanner(
results,
upload_pk,
scanner=WAT,
api_url=settings.WAT_API_URL,
api_key=settings.WAT_API_KEY,
)
@validation_task
def run_yara(results, upload_pk):
"""
Apply a set of Yara rules on a FileUpload and store the Yara results
(matches).
This task is intended to be run as part of the submission process only.
When a version is created from a FileUpload, the files are removed. In
addition, we usually delete old FileUpload entries after 180 days.
- `results` are the validation results passed in the validation chain. This
task is a validation task, which is why it must receive the validation
results as first argument.
- `upload_pk` is the FileUpload ID.
"""
log.info('Starting yara task for FileUpload %s.', upload_pk)
if not results['metadata']['is_webextension']:
log.info('Not running yara for FileUpload %s, it is not a '
'webextension.', upload_pk)
return results
try:
upload = FileUpload.objects.get(pk=upload_pk)
scanner_result = ScannerResult(upload=upload, scanner=YARA)
_run_yara_for_path(scanner_result, upload.path)
scanner_result.save()
if scanner_result.has_matches:
statsd.incr('devhub.yara.has_matches')
for scanner_rule in scanner_result.matched_rules.all():
statsd.incr(
'devhub.yara.rule.{}.match'.format(scanner_rule.id)
)
statsd.incr('devhub.yara.success')
log.info('Ending scanner "yara" task for FileUpload %s.', upload_pk)
except Exception:
statsd.incr('devhub.yara.failure')
# We log the exception but we do not raise to avoid perturbing the
# submission flow.
log.exception('Error in scanner "yara" task for FileUpload %s.',
upload_pk, exc_info=True)
return results
def _run_yara_for_path(scanner_result, path, definition=None):
"""
Inner function to run yara on a particular path and add results to the
given scanner_result. The caller is responsible for saving the
scanner_result to the database.
Takes an optional definition to run a single arbitrary yara rule, otherwise
uses all active yara ScannerRules.
"""
with statsd.timer('devhub.yara'):
if definition is None:
# Retrieve then concatenate all the active/valid Yara rules.
definition = '\n'.join(
ScannerRule.objects.filter(
scanner=YARA, is_active=True, definition__isnull=False
).values_list('definition', flat=True)
)
# Initialize external variables so that compilation works, we'll
# override them later when matching.
externals = ScannerRule.get_yara_externals()
rules = yara.compile(source=definition, externals=externals)
zip_file = SafeZip(source=path)
for zip_info in zip_file.info_list:
if not zip_info.is_dir():
file_content = zip_file.read(zip_info).decode(
errors='ignore'
)
filename = zip_info.filename
# Fill externals variable for this file.
externals['is_json_file'] = filename.endswith('.json')
externals['is_manifest_file'] = filename == 'manifest.json'
externals['is_locale_file'] = (
filename.startswith('_locales/') and
filename.endswith('/messages.json')
)
for match in rules.match(
data=file_content, externals=externals):
# Also add the filename to the meta dict in results.
meta = {**match.meta, 'filename': filename}
scanner_result.add_yara_result(
rule=match.rule,
tags=match.tags,
meta=meta
)
zip_file.close()
@task
@use_primary_db
def mark_yara_query_rule_as_completed_or_aborted(query_rule_pk):
"""
Mark a ScannerQueryRule as completed/aborted.
"""
rule = ScannerQueryRule.objects.get(pk=query_rule_pk)
try:
if rule.state == RUNNING:
log.info('Marking Yara Query Rule %s as completed', rule.pk)
rule.change_state_to(COMPLETED)
elif rule.state == ABORTING:
log.info('Marking Yara Query Rule %s as aborted', rule.pk)
rule.change_state_to(ABORTED)
except ImproperScannerQueryRuleStateError:
log.error('Not marking rule as completed or aborted for rule %s in '
'mark_yara_query_rule_as_completed_or_aborted, its state is '
'%s', rule.pk, rule.get_state_display())
@task
def run_yara_query_rule(query_rule_pk):
"""
Run a specific ScannerQueryRule on multiple Versions.
Needs the rule to be a the SCHEDULED state, otherwise does nothing.
"""
# We're not forcing this task to happen on primary db to let the replicas
# handle the Version query below, but we want to fetch the rule using the
# primary db in all cases.
rule = ScannerQueryRule.objects.using('default').get(pk=query_rule_pk)
try:
rule.change_state_to(RUNNING)
except ImproperScannerQueryRuleStateError:
log.error('Not proceeding with run_yara_query_rule on rule %s because '
'its state is %s', rule.pk, rule.get_state_display())
return
log.info('Fetching versions for run_yara_query_rule on rule %s', rule.pk)
# Build a huge list of all pks we're going to run the tasks on.
qs = Version.unfiltered.filter(
addon__type=amo.ADDON_EXTENSION, files__is_webextension=True,
)
if not rule.run_on_disabled_addons:
qs = qs.exclude(addon__status=amo.STATUS_DISABLED)
qs = qs.values_list('id', flat=True).order_by('pk')
# Build the workflow using a group of tasks dealing with 250 files at a
# time, chained to a task that marks the query as completed.
chunk_size = 250
chunked_tasks = create_chunked_tasks_signatures(
run_yara_query_rule_on_versions_chunk, list(qs), chunk_size,
task_args=(query_rule_pk,))
# Force the group id to be generated for those tasks, and store it in the
# result backend.
group_result = chunked_tasks.freeze()
group_result.save()
rule.update(
task_count=len(chunked_tasks),
celery_group_result_id=uuid.UUID(group_result.id)
)
workflow = (
chunked_tasks |
mark_yara_query_rule_as_completed_or_aborted.si(query_rule_pk)
)
log.info('Running workflow of %s tasks for run_yara_query_rule on rule %s',
len(chunked_tasks), rule.pk)
# Fire it up.
workflow.apply_async()
@task(ignore_result=False) # We want the results to track completion rate.
@use_primary_db
def run_yara_query_rule_on_versions_chunk(version_pks, query_rule_pk):
"""
Task to run a specific ScannerQueryRule on a list of versions.
Needs the rule to be a the RUNNING state, otherwise does nothing.
"""
log.info(
'Running Yara Query Rule %s on versions %s-%s.',
query_rule_pk, version_pks[0], version_pks[-1])
rule = ScannerQueryRule.objects.get(pk=query_rule_pk)
if rule.state != RUNNING:
log.info(
'Not doing anything for Yara Query Rule %s on versions %s-%s '
'since rule state is %s.', query_rule_pk, version_pks[0],
version_pks[-1], rule.get_state_display())
return
for version_pk in version_pks:
try:
version = Version.unfiltered.all().no_transforms().get(
pk=version_pk)
_run_yara_query_rule_on_version(version, rule)
except Exception:
log.exception(
'Error in run_yara_query_rule_on_version task for Version %s.',
version_pk)
def _run_yara_query_rule_on_version(version, rule):
"""
Run a specific ScannerQueryRule on a Version.
"""
file_ = version.all_files[0]
scanner_result = ScannerQueryResult(version=version, scanner=YARA)
try:
_run_yara_for_path(
scanner_result, file_.current_file_path,
definition=rule.definition)
except FileNotFoundError:
# Fallback in case the file was disabled/re-enabled and not yet moved,
# we try the other possible path. This shouldn't happen too often.
tried_path = file_.current_file_path
fallback_path = (
file_.file_path if tried_path == file_.guarded_file_path
else file_.guarded_file_path
)
_run_yara_for_path(
scanner_result, fallback_path, definition=rule.definition)
# Unlike ScannerResult, we only want to save ScannerQueryResult if there is
# a match, there would be too many things to save otherwise and we don't
# really care about non-matches.
if scanner_result.results:
scanner_result.save()
# FIXME: run_action ?
return scanner_result
@task
@use_primary_db
def call_mad_api(all_results, upload_pk):
"""
Call the machine learning API (mad-server) for a given FileUpload.
This task is the callback of the Celery chord in the validation chain. It
receives all the results returned by all the tasks in this chord.
- `all_results` are the results returned by all the tasks in the chord.
- `upload_pk` is the FileUpload ID.
"""
# This task is the callback of a Celery chord and receives all the results
# returned by all the tasks in this chord. The first task registered in the
# chord is `forward_linter_results()`:
results = all_results[0]
# In case of a validation (linter) error, we do want to skip this task.
# This is similar to the behavior of all other tasks decorated with
# `@validation_task` but, because this task is the callback of a Celery
# chord, we cannot use this decorator.
if results['errors'] > 0:
return results
if not waffle.switch_is_active('enable-mad'):
log.debug('Skipping scanner "mad" task, switch is off')
return results
log.info('Starting scanner "mad" task for FileUpload %s.', upload_pk)
if not results['metadata']['is_webextension']:
log.info(
'Not calling scanner "mad" for FileUpload %s, it is not '
'a webextension.',
upload_pk,
)
return results
try:
# TODO: retrieve all scanner results and pass each result to the API.
customs_results = ScannerResult.objects.get(
upload_id=upload_pk, scanner=CUSTOMS
)
scanMapKeys = customs_results.results.get('scanMap', {}).keys()
if len(scanMapKeys) < 2:
log.info(
'Not calling scanner "mad" for FileUpload %s, scanMap is too '
'small.',
upload_pk
)
statsd.incr('devhub.mad.skip')
return results
with statsd.timer('devhub.mad'):
json_payload = {'scanners': {'customs': customs_results.results}}
response = requests.post(
url=settings.MAD_API_URL,
json=json_payload,
timeout=settings.MAD_API_TIMEOUT,
)
try:
data = response.json()
except ValueError:
# Log the response body when JSON decoding has failed.
raise ValueError(response.text)
if response.status_code != 200:
raise ValueError(data)
default_score = -1
ScannerResult.objects.create(
upload_id=upload_pk,
scanner=MAD,
results=data,
score=data.get('ensemble', default_score),
)
# Update the individual scanner results scores.
customs_score = (
data.get('scanners', {})
.get('customs', {})
.get('score', default_score)
)
customs_results.update(score=customs_score)
statsd.incr('devhub.mad.success')
log.info('Ending scanner "mad" task for FileUpload %s.', upload_pk)
except Exception:
statsd.incr('devhub.mad.failure')
# We log the exception but we do not raise to avoid perturbing the
# submission flow.
log.exception(
'Error in scanner "mad" task for FileUpload %s.', upload_pk
)
return results
| 36.094262 | 79 | 0.644544 |
15ce7420a0e24295159ce14d6794de9cbeea0a07 | 959 | py | Python | MovingFile.py | 3n5/functions-python | be42f7b2108f8ed481956ce1c1ddc356c4ce17d6 | [
"MIT"
] | 2 | 2020-12-29T06:32:43.000Z | 2020-12-29T06:32:45.000Z | MovingFile.py | h4r3/functions-python | be42f7b2108f8ed481956ce1c1ddc356c4ce17d6 | [
"MIT"
] | null | null | null | MovingFile.py | h4r3/functions-python | be42f7b2108f8ed481956ce1c1ddc356c4ce17d6 | [
"MIT"
] | null | null | null | """Move the files that match the csv list to another folder."""
def file_move_based_on_csv():
import shutil
import glob
import csv
_from=r'C:/Users/USER/Desktop/_from' #Source directory #[[apple],[banana],[grape]]
_to=r'C:/Users/USER/Desktop/_to' #Destination Directory []
_csv=r'C:/Users/USER/Downloads/archive/reference.csv'#[[apple],[banana]]
with open(_csv, 'r') as csvfile: # , encoding='shift_jis'
csv_reader = csv.reader(csvfile, delimiter=',', quotechar='"')
for file1 in csv_reader:
file = glob.glob('{0}/[{1}*'.format(','.join(file1)))
if len(file) == 0:
pass
elif len(file) == 1:
shutil.move(','.join(file), _to)
#shutil.copy(','.join(file),_to )
else:
for file2 in file:
shutil.move(file2, _to)
#shutil.copy(i2,_to) #if copy
| 41.695652 | 87 | 0.53806 |
ac411e7fb7644e0ef64a2f57c840fed485752ba4 | 19,658 | py | Python | yauber_algo/_algo/tests/test_featurewiz_apply.py | LongNguyen1012/yauber-algo | 40f7d805408a788e7f363ec02d3315ff379cc069 | [
"MIT"
] | 1 | 2019-02-09T15:34:34.000Z | 2019-02-09T15:34:34.000Z | yauber_algo/_algo/tests/test_featurewiz_apply.py | LongNguyen1012/yauber-algo | 40f7d805408a788e7f363ec02d3315ff379cc069 | [
"MIT"
] | null | null | null | yauber_algo/_algo/tests/test_featurewiz_apply.py | LongNguyen1012/yauber-algo | 40f7d805408a788e7f363ec02d3315ff379cc069 | [
"MIT"
] | 2 | 2018-10-10T16:01:59.000Z | 2018-11-27T16:50:56.000Z | import unittest
from yauber_algo.errors import *
class ApplyTestCase(unittest.TestCase):
def test_percent_rank_category(self):
import yauber_algo.sanitychecks as sc
from numpy import array, nan, inf
import os
import sys
import pandas as pd
import numpy as np
from yauber_algo.algo import apply
#
# Function settings
#
algo = 'apply'
func = apply
with sc.SanityChecker(algo) as s:
#
# Check regular algorithm logic
#
s.check_regular(
array([nan, nan, 6, 7, 8, 9, 6]),
func,
(
array([3, 2, 1, 4, 3, 2, 1]),
3,
np.sum,
None, # category=
None, # return_as_cat=
True, # exclude_nan=
),
suffix='rolling'
)
s.check_regular(
array([nan, nan, 6, nan, nan, 9, 6]),
func,
(
array([3, 2, 1, 4, 3, 2, 1]),
3,
np.sum,
array([0, 0, 0, 1, 1, 1, 1]), # category=
None, # return_as_cat=
True, # exclude_nan=
),
suffix='category'
)
s.check_regular(
array([nan, nan, nan, nan, nan, nan, nan]),
func,
(
array([3, 2, 1, 4, 3, 2, 1]),
3,
np.sum,
array([0, 0, 0, 1, 1, 1, 1]), # category=
3, # return_as_cat=
True, # exclude_nan=
),
suffix='category_ret_as_cat_number_not_exists'
)
s.check_regular(
None,
func,
(
np.arange(0, 101),
3,
np.sum,
np.arange(0, 101),
3, # return_as_cat=
True, # exclude_nan=
),
suffix='category_more_than_100_unique_cats',
exception=YaUberAlgoArgumentError
)
s.check_regular(
array([nan, nan, 6, 6, 6, 6, 6]),
func,
(
array([3, 2, 1, 4, 3, 2, 1]),
3,
np.sum,
array([0, 0, 0, 1, 1, 1, 1]), # category=
0, # return_as_cat=
True, # exclude_nan=
),
suffix='category_exact'
)
s.check_regular(
array([nan, nan, 6, 6, 6, 6, 6]),
func,
(
array([3, 2, 1, nan, nan, nan, nan]),
3,
np.sum,
array([0, 0, 0, 1, 1, 1, 1]), # category=
0, # return_as_cat=
True, # exclude_nan=
),
suffix='category_ret_nan'
)
s.check_regular(
array([nan, nan, nan, nan, nan, nan, nan]),
func,
(
array([3, 2, 1, 4, 1, nan, nan]),
3,
np.sum,
array([0, 0, 0, 1, 1, 1, 1]), # category=
array([1, 1, 1, 1, 1, 1, 1]), # return_as_cat=
True, # exclude_nan=
),
suffix='category_ret_nan_if_arr_nan'
)
s.check_regular(
array([nan, nan, 6, 7, 8, 9, 6]),
func,
(
array([3, 2, 1, 4, 3, 2, 1]),
0,
np.sum,
None, # category=
None, # return_as_cat=
True, # exclude_nan=
),
suffix='zero_period',
exception=YaUberAlgoArgumentError,
)
s.check_regular(
array([nan, nan, 6, 7, 8, 9, 6]),
func,
(
array([3, 2, 1, 4, 3, 2, 1]),
-1,
np.sum,
None, # category=
None, # return_as_cat=
True, # exclude_nan=
),
suffix='neg_period',
exception=YaUberAlgoArgumentError,
)
s.check_regular(
func(array([3, 2, 1, 4, 3, 2, 1]), 3, np.sum),
func,
(
array([3, 2, 1, 4, 3, 2, 1]),
3,
np.sum,
array([1, 1, 1, 1, 1, 1, 1]), # category=
None, # return_as_cat=
True, # exclude_nan=
),
suffix='rolling_and_categorical_equal'
)
s.check_regular(
array([nan, nan, nan, nan, 8, 9, 6]),
func,
(
array([3, nan, 1, 4, 3, 2, 1]),
3,
np.sum,
None, # category=
None, # return_as_cat=
False, # exclude_nan=
),
suffix='rolling_not_exclude_nan'
)
#
# NAN / INF
#
#
s.check_naninf(
array([nan, nan, nan, nan, 8, 9, nan]),
func,
(
array([inf, nan, 1, 4, 3, 2, inf]),
3,
np.sum,
None, # category=
None, # return_as_cat=
False, # exclude_nan=
),
suffix='rolling'
)
s.check_naninf(
array([nan, nan, 1, 5, 8, 9, nan]),
func,
(
array([inf, nan, 1, 4, 3, 2, inf]),
3,
np.sum,
None, # category=
None, # return_as_cat=
True, # exclude_nan=
),
suffix='rolling_naninf_excluded'
)
s.check_series(
pd.Series(array([nan, nan, 6, 7, 8, 9, 6])),
func,
(
pd.Series(array([3, 2, 1, 4, 3, 2, 1])),
3,
np.sum,
None, # category=
None, # return_as_cat=
True, # exclude_nan=
),
suffix='rolling'
)
s.check_series(
pd.Series(array([nan, nan, 6, 7, 8, 9, 6])),
func,
(
pd.Series(array([3, 2, 1, 4, 3, 2, 1])),
3,
np.sum,
pd.Series(array([0, 0, 0, 0, 0, 0, 0])), # category=
None, # return_as_cat=
True, # exclude_nan=
),
suffix='categorical'
)
s.check_series(
pd.Series(array([nan, nan, 6, 7, 8, 9, 6])),
func,
(
pd.Series(array([3, 2, 1, 4, 3, 2, 1])),
3,
np.sum,
pd.Series(array([0, 0, 0, 0, 0, 0, 0])), # category=
pd.Series(array([0, 0, 0, 0, 0, 0, 0])), # return_as_cat=
True, # exclude_nan=
),
suffix='categorical_ret_as'
)
s.check_regular(
array([nan, nan, 6, 7, nan, nan, 6]),
func,
(
array([3, 2, 1, 4, 3, 2, 1]),
3,
np.sum,
array([0, 0, 0, 0, 1, 1, 1]), # category=
None, # return_as_cat=
True, # exclude_nan=
),
suffix='categorical'
)
s.check_naninf(
array([nan, nan, 6, nan, nan, nan, nan]),
func,
(
array([3, 2, 1, nan, 3, 2, inf]),
3,
np.sum,
array([0, 0, 0, 0, 1, 1, 1]), # category=
None, # return_as_cat=
True, # exclude_nan=
),
suffix='categorical'
)
s.check_naninf(
array([nan, nan, 6, nan, nan, nan, nan]),
func,
(
array([3, 2, 1, 2, 3, 2, 4]),
3,
np.sum,
array([0, 0, 0, inf, 1, 1, nan]), # category=
None, # return_as_cat=
True, # exclude_nan=
),
suffix='nan_for_category'
)
s.check_naninf(
array([nan, nan, 6, 6, 6, 6, 6]),
func,
(
array([3, 2, 1, 2, 3, 2, 4]),
3,
np.sum,
array([0, 0, 0, inf, 1, 1, nan]), # category=
0, # return_as_cat=
True, # exclude_nan=
),
suffix='return_as_cat_ignore_codex',
ignore_nan_argument_position_check=True,
)
s.check_naninf(
array([nan, nan, nan, nan, nan, 6, 6]),
func,
(
array([3, 2, 1, 2, 3, 2, nan]),
3,
np.sum,
array([0, 0, 1, inf, 1, 1, nan]), # category=
1, # return_as_cat=
True, # exclude_nan=
),
suffix='return_as_cat_non_NAN_if_reference_with_valid_window',
ignore_nan_argument_position_check=True,
)
s.check_naninf(
array([nan, nan, nan, nan, nan, 6, nan]),
func,
(
array([3, 2, 1, 2, 3, 2, nan]),
3,
np.sum,
array([0, 0, 1, inf, 1, 1, 1]), # category=
1, # return_as_cat=
True, # exclude_nan=
),
suffix='return_as_cat_NOT_ignore_codex_if_same_cat',
)
s.check_naninf(
array([nan, nan, nan, nan, nan, nan, nan]),
func,
(
array([3, 2, 1, 2, 3, 2, nan]),
3,
np.sum,
array([0, 0, 1, inf, 1, 1, nan]), # category=
0, # return_as_cat=
True, # exclude_nan=
),
suffix='return_as_cat_widows_less_period',
)
s.check_dtype_float(
array([nan, nan, 6, 7, 8, 9, 6], dtype=np.float),
func,
(
array([3, 2, 1, 4, 3, 2, 1], dtype=np.float),
3,
np.sum,
None, # category=
None, # return_as_cat=
True, # exclude_nan=
),
suffix='rolling'
)
s.check_dtype_float(
array([nan, nan, 6, 5, nan, nan, 9], dtype=np.float),
func,
(
array([3, 2, 1, 2, 3, 2, 4], dtype=np.float),
3,
np.sum,
array([0, 0, 0, 0, 1, 1, 1], dtype=np.float), # category=
None, # return_as_cat=
True, # exclude_nan=
),
suffix='category'
)
s.check_dtype_float(
array([nan, nan, 6, 5, 5, 5, 5], dtype=np.float),
func,
(
array([3, 2, 1, 2, 3, 2, 4], dtype=np.float),
3,
np.sum,
array([0, 0, 0, 0, 1, 1, 1], dtype=np.float), # category=
array([0, 0, 0, 0, 0, 0, 0], dtype=np.float),
True, # exclude_nan=
),
suffix='category_ret_as'
)
s.check_dtype_bool(
array([nan, nan, 3, 3, 3, 3, 3], dtype=np.float),
func,
(
array([1, 1, 1, 1, 1, 1, 1], dtype=np.bool),
3,
np.sum,
None, # category=
None, # return_as_cat=
True, # exclude_nan=
),
suffix='rolling'
)
s.check_dtype_bool(
array([nan, nan, 3, 3, nan, nan, 3], dtype=np.float),
func,
(
array([1, 1, 1, 1, 1, 1, 1], dtype=np.bool),
3,
np.sum,
array([0, 0, 0, 0, 1, 1, 1], dtype=np.bool), # category=
None, # return_as_cat=
True, # exclude_nan=
),
suffix='category'
)
s.check_dtype_bool(
array([nan, nan, 6, 5, 5, 5, 5], dtype=np.float),
func,
(
array([3, 2, 1, 2, 3, 2, 4], dtype=np.float),
3,
np.sum,
array([0, 0, 0, 0, 1, 1, 1], dtype=np.bool), # category=
array([0, 0, 0, 0, 0, 0, 0], dtype=np.bool),
True, # exclude_nan=
),
suffix='category_ret_as'
)
s.check_dtype_int(
array([nan, nan, 6, 7, 8, 9, 6], dtype=np.float),
func,
(
array([3, 2, 1, 4, 3, 2, 1], dtype=np.int32),
3,
np.sum,
None, # category=
None, # return_as_cat=
True, # exclude_nan=
),
suffix='rolling'
)
s.check_dtype_int(
array([nan, nan, 6, 5, nan, nan, 9], dtype=np.float),
func,
(
array([3, 2, 1, 2, 3, 2, 4], dtype=np.int32),
3,
np.sum,
array([0, 0, 0, 0, 1, 1, 1], dtype=np.int32), # category=
None, # return_as_cat=
True, # exclude_nan=
),
suffix='category'
)
s.check_dtype_int(
array([nan, nan, 6, 5, 5, 5, 5], dtype=np.float),
func,
(
array([3, 2, 1, 2, 3, 2, 4], dtype=np.float),
3,
np.sum,
array([0, 0, 0, 0, 1, 1, 1], dtype=np.int32), # category=
array([0, 0, 0, 0, 0, 0, 0], dtype=np.int32),
True, # exclude_nan=
),
suffix='category_ret_as'
)
s.check_dtype_object(
func,
(
array([3, 2, 1, 4, 3, 2, 1], dtype=np.object),
3,
np.sum,
None, # category=
None, # return_as_cat=
True, # exclude_nan=
),
suffix='rolling'
)
s.check_dtype_object(
func,
(
array([3, 2, 1, 2, 3, 2, 4], dtype=np.object),
3,
np.sum,
array([0, 0, 0, 0, 1, 1, 1], dtype=np.object), # category=
None, # return_as_cat=
True, # exclude_nan=
),
suffix='category'
)
s.check_dtype_object(
func,
(
array([3, 2, 1, 2, 3, 2, 4], dtype=np.float),
3,
np.sum,
array([0, 0, 0, 0, 1, 1, 1], dtype=np.float), # category=
array([0, 0, 0, 0, 0, 0, 0], dtype=np.object),
True, # exclude_nan=
),
suffix='category_ret_as'
)
s.check_futref(5, 1,
func,
(
np.random.random(100),
5,
np.sum,
),
suffix='rolling'
)
s.check_window_consistency(5, 1,
func,
(
np.random.random(100),
5,
np.sum,
),
suffix='rolling'
)
s.check_futref(5, 1,
func,
(
np.random.random(100),
5,
np.sum,
np.random.random_integers(0, 3, 100),
),
suffix='category'
)
s.check_window_consistency(5, 1,
func,
(
np.random.random(100),
5,
np.sum,
np.random.random_integers(0, 3, 100),
),
suffix='category'
)
s.check_futref(5, 1,
func,
(
np.random.random(100),
5,
np.sum,
np.random.random_integers(0, 3, 100),
np.random.random_integers(0, 3, 100),
),
suffix='category_ret_as'
)
s.check_window_consistency(5, 1,
func,
(
np.random.random(100),
5,
np.sum,
np.random.random_integers(0, 3, 100),
np.random.random_integers(0, 3, 100),
),
suffix='category_ret_as'
)
| 32.983221 | 80 | 0.303744 |
c8adebe0a9b393298ed3581f18877962497bd05d | 10,336 | py | Python | sdk/python/pulumi_azure_native/apimanagement/v20191201/notification_recipient_user.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/apimanagement/v20191201/notification_recipient_user.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/apimanagement/v20191201/notification_recipient_user.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['NotificationRecipientUserArgs', 'NotificationRecipientUser']
@pulumi.input_type
class NotificationRecipientUserArgs:
def __init__(__self__, *,
notification_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
service_name: pulumi.Input[str],
user_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a NotificationRecipientUser resource.
:param pulumi.Input[str] notification_name: Notification Name Identifier.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] service_name: The name of the API Management service.
:param pulumi.Input[str] user_id: User identifier. Must be unique in the current API Management service instance.
"""
pulumi.set(__self__, "notification_name", notification_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "service_name", service_name)
if user_id is not None:
pulumi.set(__self__, "user_id", user_id)
@property
@pulumi.getter(name="notificationName")
def notification_name(self) -> pulumi.Input[str]:
"""
Notification Name Identifier.
"""
return pulumi.get(self, "notification_name")
@notification_name.setter
def notification_name(self, value: pulumi.Input[str]):
pulumi.set(self, "notification_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> pulumi.Input[str]:
"""
The name of the API Management service.
"""
return pulumi.get(self, "service_name")
@service_name.setter
def service_name(self, value: pulumi.Input[str]):
pulumi.set(self, "service_name", value)
@property
@pulumi.getter(name="userId")
def user_id(self) -> Optional[pulumi.Input[str]]:
"""
User identifier. Must be unique in the current API Management service instance.
"""
return pulumi.get(self, "user_id")
@user_id.setter
def user_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_id", value)
class NotificationRecipientUser(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
notification_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
user_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Recipient User details.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] notification_name: Notification Name Identifier.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] service_name: The name of the API Management service.
:param pulumi.Input[str] user_id: User identifier. Must be unique in the current API Management service instance.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: NotificationRecipientUserArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Recipient User details.
:param str resource_name: The name of the resource.
:param NotificationRecipientUserArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(NotificationRecipientUserArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
notification_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
user_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = NotificationRecipientUserArgs.__new__(NotificationRecipientUserArgs)
if notification_name is None and not opts.urn:
raise TypeError("Missing required property 'notification_name'")
__props__.__dict__["notification_name"] = notification_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if service_name is None and not opts.urn:
raise TypeError("Missing required property 'service_name'")
__props__.__dict__["service_name"] = service_name
__props__.__dict__["user_id"] = user_id
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201:NotificationRecipientUser"), pulumi.Alias(type_="azure-native:apimanagement:NotificationRecipientUser"), pulumi.Alias(type_="azure-nextgen:apimanagement:NotificationRecipientUser"), pulumi.Alias(type_="azure-native:apimanagement/v20170301:NotificationRecipientUser"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20170301:NotificationRecipientUser"), pulumi.Alias(type_="azure-native:apimanagement/v20180101:NotificationRecipientUser"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180101:NotificationRecipientUser"), pulumi.Alias(type_="azure-native:apimanagement/v20180601preview:NotificationRecipientUser"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180601preview:NotificationRecipientUser"), pulumi.Alias(type_="azure-native:apimanagement/v20190101:NotificationRecipientUser"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20190101:NotificationRecipientUser"), pulumi.Alias(type_="azure-native:apimanagement/v20191201preview:NotificationRecipientUser"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201preview:NotificationRecipientUser"), pulumi.Alias(type_="azure-native:apimanagement/v20200601preview:NotificationRecipientUser"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20200601preview:NotificationRecipientUser"), pulumi.Alias(type_="azure-native:apimanagement/v20201201:NotificationRecipientUser"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20201201:NotificationRecipientUser"), pulumi.Alias(type_="azure-native:apimanagement/v20210101preview:NotificationRecipientUser"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20210101preview:NotificationRecipientUser")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(NotificationRecipientUser, __self__).__init__(
'azure-native:apimanagement/v20191201:NotificationRecipientUser',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'NotificationRecipientUser':
"""
Get an existing NotificationRecipientUser resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = NotificationRecipientUserArgs.__new__(NotificationRecipientUserArgs)
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
__props__.__dict__["user_id"] = None
return NotificationRecipientUser(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userId")
def user_id(self) -> pulumi.Output[Optional[str]]:
"""
API Management UserId subscribed to notification.
"""
return pulumi.get(self, "user_id")
| 49.932367 | 1,733 | 0.685178 |
e416282f8fbefd82b41349bdfbe90a9d976edf76 | 346 | py | Python | src/aims_ui/table_utils.py | ONSdigital/address-index-ui | 57867604744f7bf6e2d596fc649bb8d3bf9baf11 | [
"MIT"
] | null | null | null | src/aims_ui/table_utils.py | ONSdigital/address-index-ui | 57867604744f7bf6e2d596fc649bb8d3bf9baf11 | [
"MIT"
] | 1 | 2022-03-11T20:46:21.000Z | 2022-03-11T20:46:21.000Z | src/aims_ui/table_utils.py | ONSdigital/address-index-ui | 57867604744f7bf6e2d596fc649bb8d3bf9baf11 | [
"MIT"
] | 1 | 2021-04-11T08:02:33.000Z | 2021-04-11T08:02:33.000Z | def create_table(table_headers, table_rows):
"""Given array of table headers, and an array of arrays for each row, format as table"""
ths = [{'value': header_name} for header_name in table_headers]
trs = [{
'tds': [{
'value': col_val
} for col_val in row]
} for row in table_rows]
return {'ths': ths, 'trs': trs}
| 31.454545 | 90 | 0.635838 |
9bdc9b56996ef117cc846c8408d3e5dc9ccd5fa9 | 1,147 | py | Python | _zadania/2_python_controlflow/comprehension_dict_c.py | Khayn/2021-12-elearning-pythonana | a54e407adc8fb8c3a5fd2522735ae09cdef6540a | [
"MIT"
] | null | null | null | _zadania/2_python_controlflow/comprehension_dict_c.py | Khayn/2021-12-elearning-pythonana | a54e407adc8fb8c3a5fd2522735ae09cdef6540a | [
"MIT"
] | null | null | null | _zadania/2_python_controlflow/comprehension_dict_c.py | Khayn/2021-12-elearning-pythonana | a54e407adc8fb8c3a5fd2522735ae09cdef6540a | [
"MIT"
] | null | null | null | """
* Assignment: Comprehension Dict Reverse
* Required: yes
* Complexity: easy
* Lines of code: 1 lines
* Time: 3 min
English:
1. Use dict comprehension to reverse dict:
that is: change keys for values and values for keys
2. Run doctests - all must succeed
Polish:
1. Użyj rozwinięcia słownikowego do odwócenia słownika:
to jest: zamień klucze z wartościami i wartości z kluczami
2. Uruchom doctesty - wszystkie muszą się powieść
Hints:
* `dict.items()`
Tests:
>>> import sys; sys.tracebacklimit = 0
>>> assert type(result) is dict
>>> assert all(type(x) is str for x in result.keys())
>>> assert all(type(x) is int for x in result.values())
>>> assert len(result.keys()) == 3
>>> assert 'virginica' in result.keys()
>>> assert 'setosa' in result.keys()
>>> assert 'versicolor' in result.keys()
>>> assert 0 in result.values()
>>> assert 1 in result.values()
>>> assert 2 in result.values()
>>> result
{'virginica': 0, 'setosa': 1, 'versicolor': 2}
"""
DATA = {
0: 'virginica',
1: 'setosa',
2: 'versicolor'}
# dict[str,int]:
result = ...
| 23.895833 | 65 | 0.625109 |
0015aef0840b512b0935c1270f8969bd1f363388 | 5,247 | py | Python | main.py | cody-shearer/mtg-data-load | bb4bee53edf8269903dcdce6d035300f7cf13b5f | [
"Apache-2.0"
] | null | null | null | main.py | cody-shearer/mtg-data-load | bb4bee53edf8269903dcdce6d035300f7cf13b5f | [
"Apache-2.0"
] | null | null | null | main.py | cody-shearer/mtg-data-load | bb4bee53edf8269903dcdce6d035300f7cf13b5f | [
"Apache-2.0"
] | null | null | null | # This file will run on startup and update card data.
# All vintage legal, non token, non rear facing creature cards will be loaded.
# Data is stored in a local MariaDB instance and in the file system.
# All card data is pulled from the Scryfall API Oracle Card data set found here https://scryfall.com/docs/api/bulk-data
# Images come from the Scryfall Imagery API for art_crop found here https://scryfall.com/docs/api/images
import json
import mariadb
import os
import requests
import time
import urllib3
import uuid
from io import BytesIO
from PIL import Image
def main():
cards = []
# stop mysql, pi zero cannot handle parsing ~70 MB of json with mysql running
os.system('/etc/init.d/mysql stop')
for card in get_oracle_cards():
cmc = card['cmc']
legality = card['legalities']['vintage']
if 'image_uris' in card:
art_uri = card['image_uris']['art_crop']
# only select the front face of 2 faced cards
if 'card_faces' in card:
card = card['card_faces'][0]
if 'image_uris' in card:
# dual faced cards sometimes have card art and front face art, front face is preferred
art_uri = card['image_uris']['art_crop']
if 'Creature' in card['type_line'] and legality != 'not_legal':
cards.append([
card['name'],
card['mana_cost'].replace('{', '').replace('}', ''),
card['type_line'],
card['oracle_text'],
card['artist'],
card['power'],
card['toughness'],
cmc,
art_uri])
os.system('/etc/init.d/mysql start')
conn = mariadb.connect(host='127.0.0.1', user='root', password= 'pass', db='mtg')
create_temp_table(conn)
insert_cards(conn, cards)
print('Data loaded to MySql')
del cards
upsert(conn)
print('Data merged')
# make folders as needed
path = '/home/pi/card_images/'
if not os.path.exists(path):
os.mkdir(path)
for i in range(0, 21):
cmc_path = path + str(i)
if not os.path.exists(cmc_path):
os.mkdir(cmc_path)
print('File structure created')
for card in get_cards_missing_images(conn):
# wait as requested by API. needed to prevent IP ban.
time.sleep(0.05)
try:
response = requests.get(card[2])
img = Image.open(BytesIO(response.content))
except:
pass
else:
file_path = '/home/pi/card_images/' + \
str(card[1]) + '/' + str(uuid.uuid1()) + '.png'
img = img.convert(mode='L')
img = img.resize((304, 245))
img.save(file_path)
update_card_image(conn, card[0], file_path)
print(card[0])
print('Done!')
def get_oracle_cards():
http = urllib3.PoolManager()
bulkrequest = http.request('GET', 'https://api.scryfall.com/bulk-data')
bulkdata = json.loads(bulkrequest.data)
oracleurl = [obj for obj in bulkdata['data']
if obj['name'] == 'Oracle Cards'][0]['download_uri']
return json.loads(http.request('GET', oracleurl).data)
def create_temp_table(connection):
cursor = connection.cursor()
sql = (
'create temporary table \
mtg.oracle_staging \
select \
name, mana_cost, type, rules, artist, power, toughness, cmc, art_uri \
from \
mtg.oracle_cards \
limit 0')
cursor.execute(sql)
connection.commit()
def insert_cards(connection, cards):
cursor = connection.cursor()
sql = (
'insert into mtg.oracle_staging \
(name, mana_cost, type, rules, artist, power, toughness, cmc, art_uri) \
values \
(%s, %s, %s, %s, %s, %s, %s, %s, %s)')
cursor.executemany(sql, cards)
connection.commit()
def upsert(connection):
cursor = connection.cursor()
sql = (
'insert into \
mtg.oracle_cards ( \
name, mana_cost, type, rules, artist, power, toughness, cmc, art_uri) \
select \
os.name, os.mana_cost, os.type, os.rules, os.artist, os.power, os.toughness, os.cmc, os.art_uri \
from \
mtg.oracle_staging os \
on duplicate key update \
name = os.name, mana_cost = os.mana_cost, type = os.type, rules = os.rules, artist = os.artist, power = os.power, toughness = os.toughness, cmc = os.cmc, art_uri = os.art_uri')
cursor.execute(sql)
connection.commit()
def get_cards_missing_images(connection):
cursor = connection.cursor()
sql = (
'select \
id, cmc, art_uri, art_file \
from \
mtg.oracle_cards \
where \
art_file is null \
and not art_uri is null')
cursor.execute(sql)
return cursor.fetchall()
def update_card_image(connection, card_id, art_file):
cursor = connection.cursor()
sql = (
'update \
mtg.oracle_cards \
set \
art_file = %s \
where \
id = %s')
cursor.execute(sql, (art_file, card_id))
connection.commit()
if __name__ == '__main__':
main()
| 29.477528 | 188 | 0.581475 |
18ccf2d33ad60893ed9bdaf8bdebc6026ae90390 | 3,568 | py | Python | pymc/tests/test_convergence.py | CamDavidsonPilon/pymc | 26f05e60d0d414e8c3dc59d4d8fabe3ae3cfbcf9 | [
"MIT"
] | 1 | 2019-03-02T09:28:34.000Z | 2019-03-02T09:28:34.000Z | pymc/tests/test_convergence.py | kforeman/pymc | 5783207904097c7443bfa834c0dbcc68aa38fb76 | [
"MIT"
] | null | null | null | pymc/tests/test_convergence.py | kforeman/pymc | 5783207904097c7443bfa834c0dbcc68aa38fb76 | [
"MIT"
] | 3 | 2015-05-11T06:17:00.000Z | 2019-06-17T23:22:46.000Z | #
#
# Test of convergence diagnostics
#
#
from __future__ import with_statement
from numpy.testing import assert_equal, assert_array_equal, assert_approx_equal, TestCase
import unittest
import numpy as np
import pymc
import pymc.examples.weibull_fit as model
import os
import warnings
import copy
S = pymc.MCMC(model, 'ram')
S.sample(10000, 2000, progress_bar=0)
# a = S.a.trace()
# b = S.b.trace()
# Known data for testing integrated autocorrelation time = 2.28
x = np.array([0.98073604, 0.98073604, 0.98073604, 0.98073604, 0.98073604,
0.41424798, 0.58398493, 0.27391045, 0.27391045, 0.27391045,
0.27391045, 0.27391045, 0.72886149, 0.72886149, 0.72886149,
0.67478139, 0.67478139, 0.67478139, 0.67478139, 0.67478139,
0.67478139, 0.27720909, 0.6026456, 0.6026456, 0.47108579,
0.47108579, 0.47108579, 0.47108579, 0.47108579, 0.47108579,
0.47108579, 0.47108579, 0.47108579, 0.47108579, 0.47108579,
0.47108579, 0.47108579, 0.47108579, 0.47108579, 0.47108579,
0.47108579, 0.47108579, 0.47108579, 0.47108579, 0.47108579,
0.34546653, 0.34546653, 0.5441314, 0.5441314, 0.5441314,
0.5441314, 0.5441314, 0.5441314, 0.5441314, 0.5441314,
0.37344506, 0.37344506, 0.83126209, 0.83126209, 0.3439339,
0.3439339, 0.3439339, 0.34551721, 0.34551721, 0.34551721,
0.44112754, 0.44112754, 0.44112754, 0.55397635, 0.55397635,
0.55397635, 0.55397635, 0.55397635, 0.55397635, 0.55397635,
0.55397635, 0.55397635, 0.55397635, 0.55397635, 0.55397635,
0.55397635, 0.55397635, 0.55397635, 0.55397635, 0.55397635,
0.55397635, 0.36521137, 0.36521137, 0.36521137, 0.36521137,
0.36521137, 0.36521137, 0.36521137, 0.36521137, 0.36521137,
0.36521137, 0.36521137, 0.36521137, 0.36521137, 0.32755692])
DIR = 'testresults'
class test_geweke(TestCase):
def test_simple(self):
scores = pymc.geweke(S, intervals=20)
a_scores = scores['a']
assert_equal(len(a_scores), 20)
# If the model has converged, 95% the scores should lie
# within 2 standard deviations of zero, under standard normal model
assert(sum(np.abs(np.array(a_scores)[:, 1]) > 1.96) < 2)
# Plot diagnostics (if plotting is available)
try:
from pymc.Matplot import geweke_plot as plot
plot(scores, path=DIR, verbose=0)
except ImportError:
pass
class test_gelman_rubin(TestCase):
"""Unit test for Gelman-Rubin diagnostic"""
def test_fail(self):
pass
def test_simple(self):
S2 = copy.copy(S)
S2.sample(10000, 2000, progress_bar=0)
gr = pymc.gelman_rubin(S2)
for i in gr:
assert_approx_equal(gr[i], 1., 2)
class test_iat(TestCase):
def test_simple(self):
iat = pymc.iat(x)
# IAT should be approximately 2.28
assert_approx_equal(iat, 2.28, 2)
if __name__ == "__main__":
original_filters = warnings.filters[:]
warnings.simplefilter("ignore")
try:
import nose
C = nose.config.Config(verbosity=1)
nose.runmodule(config=C)
finally:
warnings.filters = original_filters
# TODO: Restore in 2.2
# with warnings.catch_warnings():
# warnings.simplefilter('ignore', FutureWarning)
# import nose
# C =nose.config.Config(verbosity=1)
# nose.runmodule(config=C)
| 31.575221 | 89 | 0.627803 |
bccd03aee10ff6467b759416d0a4cd969dea3d1d | 3,176 | py | Python | meraki_sdk/models/update_organization_branding_policy_model.py | meraki/meraki-python-sdk | 9894089eb013318243ae48869cc5130eb37f80c0 | [
"MIT"
] | 37 | 2019-04-24T14:01:33.000Z | 2022-01-28T01:37:21.000Z | meraki_sdk/models/update_organization_branding_policy_model.py | ankita66666666/meraki-python-sdk | 9894089eb013318243ae48869cc5130eb37f80c0 | [
"MIT"
] | 10 | 2019-07-09T16:35:11.000Z | 2021-12-07T03:47:53.000Z | meraki_sdk/models/update_organization_branding_policy_model.py | ankita66666666/meraki-python-sdk | 9894089eb013318243ae48869cc5130eb37f80c0 | [
"MIT"
] | 17 | 2019-04-30T23:53:21.000Z | 2022-02-07T22:57:44.000Z | # -*- coding: utf-8 -*-
"""
meraki_sdk
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
import meraki_sdk.models.admin_settings_model
import meraki_sdk.models.help_settings_1_model
class UpdateOrganizationBrandingPolicyModel(object):
"""Implementation of the 'updateOrganizationBrandingPolicy' model.
TODO: type model description here.
Attributes:
name (string): Name of the Dashboard branding policy.
enabled (bool): Boolean indicating whether this policy is enabled.
admin_settings (AdminSettingsModel): Settings for describing which
kinds of admins this policy applies to.
help_settings (HelpSettings1Model): Settings for describing the
modifications to various Help page features. Each property in this
object accepts one of 'default or inherit' (do not modify
functionality), 'hide' (remove the section from Dashboard), or
'show' (always show the section on Dashboard). Some properties
in this object also accept custom HTML used to replace the section
on Dashboard; see the documentation for each property to see
the allowed values.
"""
# Create a mapping from Model property names to API property names
_names = {
"name":'name',
"enabled":'enabled',
"admin_settings":'adminSettings',
"help_settings":'helpSettings'
}
def __init__(self,
name=None,
enabled=None,
admin_settings=None,
help_settings=None):
"""Constructor for the UpdateOrganizationBrandingPolicyModel class"""
# Initialize members of the class
self.name = name
self.enabled = enabled
self.admin_settings = admin_settings
self.help_settings = help_settings
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
enabled = dictionary.get('enabled')
admin_settings = meraki_sdk.models.admin_settings_model.AdminSettingsModel.from_dictionary(dictionary.get('adminSettings')) if dictionary.get('adminSettings') else None
help_settings = meraki_sdk.models.help_settings_1_model.HelpSettings1Model.from_dictionary(dictionary.get('helpSettings')) if dictionary.get('helpSettings') else None
# Return an object of this model
return cls(name,
enabled,
admin_settings,
help_settings)
| 36.930233 | 177 | 0.634446 |
29e852a50ccd38ddc1a5ef2502d94131095c12a8 | 567 | py | Python | userbot/plugins/bye.py | n8wachT/telegrrrrrr | ad254bda0710c1a38dae21f84a45cf94031fa66d | [
"MIT"
] | null | null | null | userbot/plugins/bye.py | n8wachT/telegrrrrrr | ad254bda0710c1a38dae21f84a45cf94031fa66d | [
"MIT"
] | null | null | null | userbot/plugins/bye.py | n8wachT/telegrrrrrr | ad254bda0710c1a38dae21f84a45cf94031fa66d | [
"MIT"
] | null | null | null | # For @UniBorg
# Courtesy @yasirsiddiqui
"""
.leave
"""
from telethon.tl.functions.channels import LeaveChannelRequest
from userbot.utils import admin_cmd
import time
@borg.on(admin_cmd("bye", outgoing=True))
async def leave(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`Jaa Rha Hu Mai Toh Bc Gaand Marao.....!`")
time.sleep(3)
if '-' in str(e.chat_id):
await borg(LeaveChannelRequest(e.chat_id))
else:
await e.edit('`Sir This is Not A Chat`')
| 14.921053 | 73 | 0.594356 |
627793941435a3f084ac4e5115c255a0a002bb47 | 2,050 | py | Python | qiskit/providers/ibmq/job/utils.py | Sahar2/qiskit-ibmq-provider | a7fa886f5b34123bf7bb903840e32b1bf4cc30b5 | [
"Apache-2.0"
] | 1 | 2020-07-14T20:09:52.000Z | 2020-07-14T20:09:52.000Z | qiskit/providers/ibmq/job/utils.py | Sahar2/qiskit-ibmq-provider | a7fa886f5b34123bf7bb903840e32b1bf4cc30b5 | [
"Apache-2.0"
] | null | null | null | qiskit/providers/ibmq/job/utils.py | Sahar2/qiskit-ibmq-provider | a7fa886f5b34123bf7bb903840e32b1bf4cc30b5 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Utilities for working with IBM Q Jobs."""
from datetime import datetime, timezone
def current_utc_time():
"""Gets the current time in UTC format.
Returns:
str: current time in UTC format.
"""
datetime.utcnow().replace(tzinfo=timezone.utc).isoformat()
def is_job_queued(api_job_status_response):
"""Checks whether a job has been queued or not.
Args:
api_job_status_response (dict): status response of the job.
Returns:
Pair[boolean, int]: a pair indicating if the job is queued and in which
position.
"""
is_queued, position = False, 0
if 'infoQueue' in api_job_status_response:
if 'status' in api_job_status_response['infoQueue']:
queue_status = api_job_status_response['infoQueue']['status']
is_queued = queue_status == 'PENDING_IN_QUEUE'
if 'position' in api_job_status_response['infoQueue']:
position = api_job_status_response['infoQueue']['position']
return is_queued, position
def build_error_report(results):
"""Build an user-friendly error report for a failed job.
Args:
results (dict): result section of the job response.
Returns:
str: the error report.
"""
error_list = []
for index, result in enumerate(results):
if not result['success']:
error_list.append('Experiment {}: {}'.format(index, result['status']))
error_report = 'The following experiments failed:\n{}'.format('\n'.join(error_list))
return error_report
| 31.538462 | 88 | 0.681463 |
ccf767d357292de4362302b8eb9b50f1b9ba7416 | 22,256 | py | Python | test/functional/test_framework/mininode.py | lihuanghai/bitcoin | 624da15f8c55219f4ca3e0877a17799990299504 | [
"MIT"
] | 3 | 2021-11-15T14:58:10.000Z | 2021-11-17T09:59:42.000Z | test/functional/test_framework/mininode.py | lihuanghai/bitcoin | 624da15f8c55219f4ca3e0877a17799990299504 | [
"MIT"
] | 1 | 2017-01-08T20:32:43.000Z | 2017-01-08T20:32:43.000Z | test/functional/test_framework/mininode.py | lihuanghai/bitcoin | 624da15f8c55219f4ca3e0877a17799990299504 | [
"MIT"
] | 1 | 2019-09-02T00:49:46.000Z | 2019-09-02T00:49:46.000Z | #!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin P2P network half-a-node.
This python code was modified from ArtForz' public domain half-a-node, as
found in the mini-node branch of http://github.com/jgarzik/pynode.
P2PConnection: A low-level connection object to a node's P2P interface
P2PInterface: A high-level interface object for communicating to a node over P2P
P2PDataStore: A p2p interface class that keeps a store of transactions and blocks
and can respond correctly to getdata and getheaders messages"""
import asyncio
from collections import defaultdict
from io import BytesIO
import logging
import struct
import sys
import threading
from test_framework.messages import *
from test_framework.util import wait_until
logger = logging.getLogger("TestFramework.mininode")
MESSAGEMAP = {
b"addr": msg_addr,
b"block": msg_block,
b"blocktxn": msg_blocktxn,
b"cmpctblock": msg_cmpctblock,
b"feefilter": msg_feefilter,
b"getaddr": msg_getaddr,
b"getblocks": msg_getblocks,
b"getblocktxn": msg_getblocktxn,
b"getdata": msg_getdata,
b"getheaders": msg_getheaders,
b"headers": msg_headers,
b"inv": msg_inv,
b"mempool": msg_mempool,
b"ping": msg_ping,
b"pong": msg_pong,
b"reject": msg_reject,
b"sendcmpct": msg_sendcmpct,
b"sendheaders": msg_sendheaders,
b"tx": msg_tx,
b"verack": msg_verack,
b"version": msg_version,
}
MAGIC_BYTES = {
"mainnet": b"\xf9\xbe\xb4\xd9", # mainnet
"testnet3": b"\x0b\x11\x09\x07", # testnet3
"regtest": b"\xfa\xbf\xb5\xda", # regtest
}
class P2PConnection(asyncio.Protocol):
"""A low-level connection object to a node's P2P interface.
This class is responsible for:
- opening and closing the TCP connection to the node
- reading bytes from and writing bytes to the socket
- deserializing and serializing the P2P message header
- logging messages as they are sent and received
This class contains no logic for handing the P2P message payloads. It must be
sub-classed and the on_message() callback overridden."""
def __init__(self):
# The underlying transport of the connection.
# Should only call methods on this from the NetworkThread, c.f. call_soon_threadsafe
self._transport = None
@property
def is_connected(self):
return self._transport is not None
def peer_connect(self, dstaddr, dstport, net="regtest"):
assert not self.is_connected
self.dstaddr = dstaddr
self.dstport = dstport
# The initial message to send after the connection was made:
self.on_connection_send_msg = None
self.recvbuf = b""
self.network = net
logger.debug('Connecting to Bitcoin Node: %s:%d' % (self.dstaddr, self.dstport))
loop = NetworkThread.network_event_loop
conn_gen_unsafe = loop.create_connection(lambda: self, host=self.dstaddr, port=self.dstport)
conn_gen = lambda: loop.call_soon_threadsafe(loop.create_task, conn_gen_unsafe)
return conn_gen
def peer_disconnect(self):
# Connection could have already been closed by other end.
NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.abort())
# Connection and disconnection methods
def connection_made(self, transport):
"""asyncio callback when a connection is opened."""
assert not self._transport
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self._transport = transport
if self.on_connection_send_msg:
self.send_message(self.on_connection_send_msg)
self.on_connection_send_msg = None # Never used again
self.on_open()
def connection_lost(self, exc):
"""asyncio callback when a connection is closed."""
if exc:
logger.warning("Connection lost to {}:{} due to {}".format(self.dstaddr, self.dstport, exc))
else:
logger.debug("Closed connection to: %s:%d" % (self.dstaddr, self.dstport))
self._transport = None
self.recvbuf = b""
self.on_close()
# Socket read methods
def data_received(self, t):
"""asyncio callback when data is read from the socket."""
if len(t) > 0:
self.recvbuf += t
self._on_data()
def _on_data(self):
"""Try to read P2P messages from the recv buffer.
This method reads data from the buffer in a loop. It deserializes,
parses and verifies the P2P header, then passes the P2P payload to
the on_message callback for processing."""
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command not in MESSAGEMAP:
raise ValueError("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
f = BytesIO(msg)
t = MESSAGEMAP[command]()
t.deserialize(f)
self._log_message("receive", t)
self.on_message(t)
except Exception as e:
logger.exception('Error reading message:', repr(e))
raise
def on_message(self, message):
"""Callback for processing a P2P payload. Must be overridden by derived class."""
raise NotImplementedError
# Socket write methods
def send_message(self, message):
"""Send a P2P message over the socket.
This method takes a P2P payload, builds the P2P header and adds
the message to the send buffer to be sent over the socket."""
if not self.is_connected:
raise IOError('Not connected')
self._log_message("send", message)
tmsg = self._build_message(message)
def maybe_write():
if not self._transport:
return
# Python <3.4.4 does not have is_closing, so we have to check for
# its existence explicitly as long as Bitcoin Core supports all
# Python 3.4 versions.
if hasattr(self._transport, 'is_closing') and self._transport.is_closing():
return
self._transport.write(tmsg)
NetworkThread.network_event_loop.call_soon_threadsafe(maybe_write)
# Class utility methods
def _build_message(self, message):
"""Build a serialized P2P message"""
command = message.command
data = message.serialize()
tmsg = MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
return tmsg
def _log_message(self, direction, msg):
"""Logs a message being sent or received over the connection."""
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
class P2PInterface(P2PConnection):
"""A high-level P2P interface class for communicating with a Bitcoin node.
This class provides high-level callbacks for processing P2P message
payloads, as well as convenience methods for interacting with the
node over P2P.
Individual testcases should subclass this and override the on_* methods
if they want to alter message handling behaviour."""
def __init__(self):
super().__init__()
# Track number of messages of each type received and the most recent
# message of each type
self.message_count = defaultdict(int)
self.last_message = {}
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
# The network services received from the peer
self.nServices = 0
def peer_connect(self, *args, services=NODE_NETWORK|NODE_WITNESS, send_version=True, **kwargs):
create_conn = super().peer_connect(*args, **kwargs)
if send_version:
# Send a version msg
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.on_connection_send_msg = vt # Will be sent soon after connection_made
return create_conn
# Message receiving methods
def on_message(self, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type."""
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, 'on_' + command)(message)
except:
print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0]))
raise
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self):
pass
def on_close(self):
pass
def on_addr(self, message): pass
def on_block(self, message): pass
def on_blocktxn(self, message): pass
def on_cmpctblock(self, message): pass
def on_feefilter(self, message): pass
def on_getaddr(self, message): pass
def on_getblocks(self, message): pass
def on_getblocktxn(self, message): pass
def on_getdata(self, message): pass
def on_getheaders(self, message): pass
def on_headers(self, message): pass
def on_mempool(self, message): pass
def on_pong(self, message): pass
def on_reject(self, message): pass
def on_sendcmpct(self, message): pass
def on_sendheaders(self, message): pass
def on_tx(self, message): pass
def on_inv(self, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
self.send_message(want)
def on_ping(self, message):
self.send_message(msg_pong(message.nonce))
def on_verack(self, message):
self.verack_received = True
def on_version(self, message):
assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED)
self.send_message(msg_verack())
self.nServices = message.nServices
# Connection helper methods
def wait_for_disconnect(self, timeout=60):
test_function = lambda: not self.is_connected
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message receiving helper methods
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getdata(self, timeout=60):
"""Waits for a getdata message.
Receiving any getdata message will satisfy the predicate. the last_message["getdata"]
value must be explicitly cleared before calling this method, or this will return
immediately with success. TODO: change this method to take a hash value and only
return true if the correct block/tx has been requested."""
test_function = lambda: self.last_message.get("getdata")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getheaders(self, timeout=60):
"""Waits for a getheaders message.
Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"]
value must be explicitly cleared before calling this method, or this will return
immediately with success. TODO: change this method to take a hash value and only
return true if the correct block header has been requested."""
test_function = lambda: self.last_message.get("getheaders")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_inv(self, expected_inv, timeout=60):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
test_function = lambda: self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_verack(self, timeout=60):
test_function = lambda: self.message_count["verack"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message sending helper functions
def send_and_ping(self, message):
self.send_message(message)
self.sync_with_ping()
# Sync up with the node
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
wait_until(test_function, timeout=timeout, lock=mininode_lock)
self.ping_counter += 1
# One lock for synchronizing all data access between the network event loop (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# P2PConnection acquires this lock whenever delivering a message to a P2PInterface.
# This lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the P2PInterface or P2PConnection.
mininode_lock = threading.RLock()
class NetworkThread(threading.Thread):
network_event_loop = None
def __init__(self):
super().__init__(name="NetworkThread")
# There is only one event loop and no more than one thread must be created
assert not self.network_event_loop
NetworkThread.network_event_loop = asyncio.new_event_loop()
def run(self):
"""Start the network thread."""
self.network_event_loop.run_forever()
def close(self, timeout=10):
"""Close the connections and network event loop."""
self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop)
wait_until(lambda: not self.network_event_loop.is_running(), timeout=timeout)
self.network_event_loop.close()
self.join(timeout)
class P2PDataStore(P2PInterface):
"""A P2P data store class.
Keeps a block and transaction store and responds correctly to getdata and getheaders requests."""
def __init__(self):
super().__init__()
self.reject_code_received = None
self.reject_reason_received = None
# store of blocks. key is block hash, value is a CBlock object
self.block_store = {}
self.last_block_hash = ''
# store of txs. key is txid, value is a CTransaction object
self.tx_store = {}
self.getdata_requests = []
def on_getdata(self, message):
"""Check for the tx/block in our stores and if found, reply with an inv message."""
for inv in message.inv:
self.getdata_requests.append(inv.hash)
if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys():
self.send_message(msg_tx(self.tx_store[inv.hash]))
elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys():
self.send_message(msg_block(self.block_store[inv.hash]))
else:
logger.debug('getdata message type {} received.'.format(hex(inv.type)))
def on_getheaders(self, message):
"""Search back through our block store for the locator, and reply with a headers message if found."""
locator, hash_stop = message.locator, message.hashstop
# Assume that the most recent block added is the tip
if not self.block_store:
return
headers_list = [self.block_store[self.last_block_hash]]
maxheaders = 2000
while headers_list[-1].sha256 not in locator.vHave:
# Walk back through the block store, adding headers to headers_list
# as we go.
prev_block_hash = headers_list[-1].hashPrevBlock
if prev_block_hash in self.block_store:
prev_block_header = CBlockHeader(self.block_store[prev_block_hash])
headers_list.append(prev_block_header)
if prev_block_header.sha256 == hash_stop:
# if this is the hashstop header, stop here
break
else:
logger.debug('block hash {} not found in block store'.format(hex(prev_block_hash)))
break
# Truncate the list if there are too many headers
headers_list = headers_list[:-maxheaders - 1:-1]
response = msg_headers(headers_list)
if response is not None:
self.send_message(response)
def on_reject(self, message):
"""Store reject reason and code for testing."""
self.reject_code_received = message.code
self.reject_reason_received = message.reason
def send_blocks_and_test(self, blocks, rpc, success=True, request_block=True, reject_code=None, reject_reason=None, timeout=60):
"""Send blocks to test node and test whether the tip advances.
- add all blocks to our block_store
- send a headers message for the final block
- the on_getheaders handler will ensure that any getheaders are responded to
- if request_block is True: wait for getdata for each of the blocks. The on_getdata handler will
ensure that any getdata messages are responded to
- if success is True: assert that the node's tip advances to the most recent block
- if success is False: assert that the node's tip doesn't advance
- if reject_code and reject_reason are set: assert that the correct reject message is received"""
with mininode_lock:
self.reject_code_received = None
self.reject_reason_received = None
for block in blocks:
self.block_store[block.sha256] = block
self.last_block_hash = block.sha256
self.send_message(msg_headers([CBlockHeader(blocks[-1])]))
if request_block:
wait_until(lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout, lock=mininode_lock)
if success:
wait_until(lambda: rpc.getbestblockhash() == blocks[-1].hash, timeout=timeout)
else:
assert rpc.getbestblockhash() != blocks[-1].hash
if reject_code is not None:
wait_until(lambda: self.reject_code_received == reject_code, lock=mininode_lock)
if reject_reason is not None:
wait_until(lambda: self.reject_reason_received == reject_reason, lock=mininode_lock)
def send_txs_and_test(self, txs, rpc, success=True, expect_disconnect=False, reject_code=None, reject_reason=None):
"""Send txs to test node and test whether they're accepted to the mempool.
- add all txs to our tx_store
- send tx messages for all txs
- if success is True/False: assert that the txs are/are not accepted to the mempool
- if expect_disconnect is True: Skip the sync with ping
- if reject_code and reject_reason are set: assert that the correct reject message is received."""
with mininode_lock:
self.reject_code_received = None
self.reject_reason_received = None
for tx in txs:
self.tx_store[tx.sha256] = tx
for tx in txs:
self.send_message(msg_tx(tx))
if expect_disconnect:
self.wait_for_disconnect()
else:
self.sync_with_ping()
raw_mempool = rpc.getrawmempool()
if success:
# Check that all txs are now in the mempool
for tx in txs:
assert tx.hash in raw_mempool, "{} not found in mempool".format(tx.hash)
else:
# Check that none of the txs are now in the mempool
for tx in txs:
assert tx.hash not in raw_mempool, "{} tx found in mempool".format(tx.hash)
if reject_code is not None:
wait_until(lambda: self.reject_code_received == reject_code, lock=mininode_lock)
if reject_reason is not None:
wait_until(lambda: self.reject_reason_received == reject_reason, lock=mininode_lock)
| 40.539162 | 182 | 0.649937 |
b926d175e8b4cf6ebcae58efc449be5ad52207b9 | 18,648 | py | Python | nova/tests/api/openstack/compute/plugins/v3/test_keypairs.py | SnabbCo/nova | d156d7fdf241569da2c27ae02ec88e6ef448f7e2 | [
"Apache-2.0"
] | 2 | 2016-04-19T08:20:39.000Z | 2021-10-03T16:00:37.000Z | nova/tests/api/openstack/compute/plugins/v3/test_keypairs.py | SnabbCo/nova | d156d7fdf241569da2c27ae02ec88e6ef448f7e2 | [
"Apache-2.0"
] | null | null | null | nova/tests/api/openstack/compute/plugins/v3/test_keypairs.py | SnabbCo/nova | d156d7fdf241569da2c27ae02ec88e6ef448f7e2 | [
"Apache-2.0"
] | 1 | 2020-07-24T06:34:03.000Z | 2020-07-24T06:34:03.000Z | # Copyright 2011 Eldar Nugaev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack.compute.plugins.v3 import keypairs
from nova import db
from nova import exception
from nova.openstack.common import jsonutils
from nova.openstack.common import policy as common_policy
from nova import policy
from nova import quota
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests.objects import test_keypair
QUOTAS = quota.QUOTAS
keypair_data = {
'public_key': 'FAKE_KEY',
'fingerprint': 'FAKE_FINGERPRINT',
}
def fake_keypair(name):
return dict(test_keypair.fake_keypair,
name=name, **keypair_data)
def db_key_pair_get_all_by_user(self, user_id):
return [fake_keypair('FAKE')]
def db_key_pair_create(self, keypair):
return fake_keypair(name=keypair['name'])
def db_key_pair_destroy(context, user_id, name):
if not (user_id and name):
raise Exception()
def db_key_pair_create_duplicate(context, keypair):
raise exception.KeyPairExists(key_name=keypair.get('name', ''))
class KeypairsTest(test.TestCase):
def setUp(self):
super(KeypairsTest, self).setUp()
self.Controller = keypairs.Controller()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(db, "key_pair_get_all_by_user",
db_key_pair_get_all_by_user)
self.stubs.Set(db, "key_pair_create",
db_key_pair_create)
self.stubs.Set(db, "key_pair_destroy",
db_key_pair_destroy)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Keypairs'])
self.app = fakes.wsgi_app_v3(init_only=('keypairs', 'servers'))
def test_keypair_list(self):
req = webob.Request.blank('/v3/keypairs')
res = req.get_response(self.app)
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
response = {'keypairs': [{'keypair': dict(keypair_data, name='FAKE')}]}
self.assertEqual(res_dict, response)
def test_keypair_create(self):
body = {'keypair': {'name': 'create_test'}}
req = webob.Request.blank('/v3/keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 201)
res_dict = jsonutils.loads(res.body)
self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
self.assertTrue(len(res_dict['keypair']['private_key']) > 0)
def test_keypair_create_without_keypair(self):
body = {'foo': None}
req = webob.Request.blank('/v3/keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
jsonutils.loads(res.body)
def test_keypair_create_without_name(self):
body = {'keypair': {'public_key': 'public key'}}
req = webob.Request.blank('/v3/keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
res_dict = jsonutils.loads(res.body)
self.assertEqual("Invalid input for field/attribute keypair. "
"Value: {u'public_key': u'public key'}. "
"'name' is a required property",
res_dict['badRequest']['message'])
def test_keypair_create_with_empty_name(self):
body = {'keypair': {'name': ''}}
req = webob.Request.blank('/v3/keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
res_dict = jsonutils.loads(res.body)
self.assertEqual("Invalid input for field/attribute name. "
"Value: . u'' is too short",
res_dict['badRequest']['message'])
def test_keypair_create_with_name_too_long(self):
name = 'a' * 256
body = {
'keypair': {
'name': name
}
}
req = webob.Request.blank('/v3/keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
res_dict = jsonutils.loads(res.body)
expected_message = "Invalid input for field/attribute name. "\
"Value: %s. u'%s' is too long" % (name, name)
self.assertEqual(expected_message, res_dict['badRequest']['message'])
def test_keypair_create_with_non_alphanumeric_name(self):
body = {
'keypair': {
'name': 'test/keypair'
}
}
req = webob.Request.blank('/v3/keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
res_dict = jsonutils.loads(res.body)
self.assertEqual(res.status_int, 400)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
"Invalid input for field/attribute name. Value: test/keypair. "
"u'test/keypair' does not match '^(?! )[a-zA-Z0-9. _-]+(?<! )$'",
res_dict['badRequest']['message'])
def test_keypair_import(self):
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
'bHkXa6OciiJDvkRzJXzf',
},
}
req = webob.Request.blank('/v3/keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 201)
# FIXME(ja): sholud we check that public_key was sent to create?
res_dict = jsonutils.loads(res.body)
self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
self.assertNotIn('private_key', res_dict['keypair'])
def test_keypair_import_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
return 100
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
'bHkXa6OciiJDvkRzJXzf',
},
}
req = webob.Request.blank('/v3/keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 413)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
"Quota exceeded, too many key pairs.",
res_dict['overLimit']['message'])
def test_keypair_create_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
return 100
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
body = {
'keypair': {
'name': 'create_test',
},
}
req = webob.Request.blank('/v3/keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 413)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
"Quota exceeded, too many key pairs.",
res_dict['overLimit']['message'])
def test_keypair_create_duplicate(self):
self.stubs.Set(db, "key_pair_create", db_key_pair_create_duplicate)
body = {'keypair': {'name': 'create_duplicate'}}
req = webob.Request.blank('/v3/keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 409)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
"Key pair 'create_duplicate' already exists.",
res_dict['conflictingRequest']['message'])
def test_keypair_import_bad_key(self):
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-what negative',
},
}
req = webob.Request.blank('/v3/keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
'Keypair data is invalid: failed to generate fingerprint',
res_dict['badRequest']['message'])
def test_keypair_delete(self):
req = webob.Request.blank('/v3/keypairs/FAKE')
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 204)
def test_keypair_get_keypair_not_found(self):
req = webob.Request.blank('/v3/keypairs/DOESNOTEXIST')
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
def test_keypair_delete_not_found(self):
def db_key_pair_get_not_found(context, user_id, name):
raise exception.KeypairNotFound(user_id=user_id, name=name)
self.stubs.Set(db, "key_pair_get",
db_key_pair_get_not_found)
req = webob.Request.blank('/v3/keypairs/WHAT')
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
def test_keypair_show(self):
def _db_key_pair_get(context, user_id, name):
return dict(test_keypair.fake_keypair,
name='foo', public_key='XXX', fingerprint='YYY')
self.stubs.Set(db, "key_pair_get", _db_key_pair_get)
req = webob.Request.blank('/v3/keypairs/FAKE')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
res_dict = jsonutils.loads(res.body)
self.assertEqual(res.status_int, 200)
self.assertEqual('foo', res_dict['keypair']['name'])
self.assertEqual('XXX', res_dict['keypair']['public_key'])
self.assertEqual('YYY', res_dict['keypair']['fingerprint'])
def test_keypair_show_not_found(self):
def _db_key_pair_get(context, user_id, name):
raise exception.KeypairNotFound(user_id=user_id, name=name)
self.stubs.Set(db, "key_pair_get", _db_key_pair_get)
req = webob.Request.blank('/v3/keypairs/FAKE')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
def test_show_server(self):
self.stubs.Set(db, 'instance_get',
fakes.fake_instance_get())
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get())
req = webob.Request.blank('/v3/servers/1')
req.headers['Content-Type'] = 'application/json'
response = req.get_response(self.app)
self.assertEqual(response.status_int, 200)
res_dict = jsonutils.loads(response.body)
self.assertIn('key_name', res_dict['server'])
self.assertEqual(res_dict['server']['key_name'], '')
def test_detail_servers(self):
self.stubs.Set(db, 'instance_get_all_by_filters',
fakes.fake_instance_get_all_by_filters())
self.stubs.Set(db, 'instance_get_by_uuid', fakes.fake_instance_get())
req = webob.Request.blank('/v3/servers/detail')
res = req.get_response(self.app)
server_dicts = jsonutils.loads(res.body)['servers']
self.assertEqual(len(server_dicts), 5)
for server_dict in server_dicts:
self.assertIn('key_name', server_dict)
self.assertEqual(server_dict['key_name'], '')
def test_keypair_create_with_invalid_keypair_body(self):
body = {'alpha': {'name': 'create_test'}}
req = webob.Request.blank('/v3/keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
jsonutils.loads(res.body)
self.assertEqual(res.status_int, 400)
class KeypairPolicyTest(test.TestCase):
def setUp(self):
super(KeypairPolicyTest, self).setUp()
self.KeyPairController = keypairs.KeypairController()
def _db_key_pair_get(context, user_id, name):
return dict(test_keypair.fake_keypair,
name='foo', public_key='XXX', fingerprint='YYY')
self.stubs.Set(db, "key_pair_get",
_db_key_pair_get)
self.stubs.Set(db, "key_pair_get_all_by_user",
db_key_pair_get_all_by_user)
self.stubs.Set(db, "key_pair_create",
db_key_pair_create)
self.stubs.Set(db, "key_pair_destroy",
db_key_pair_destroy)
def test_keypair_list_fail_policy(self):
rules = {'compute_extension:v3:keypairs:index':
common_policy.parse_rule('role:admin')}
policy.set_rules(rules)
req = fakes.HTTPRequestV3.blank('/keypairs')
self.assertRaises(exception.Forbidden,
self.KeyPairController.index,
req)
def test_keypair_list_pass_policy(self):
rules = {'compute_extension:v3:keypairs:index':
common_policy.parse_rule('')}
policy.set_rules(rules)
req = fakes.HTTPRequestV3.blank('/keypairs')
res = self.KeyPairController.index(req)
self.assertIn('keypairs', res)
def test_keypair_show_fail_policy(self):
rules = {'compute_extension:v3:keypairs:show':
common_policy.parse_rule('role:admin')}
policy.set_rules(rules)
req = fakes.HTTPRequestV3.blank('/keypairs/FAKE')
self.assertRaises(exception.Forbidden,
self.KeyPairController.show,
req, 'FAKE')
def test_keypair_show_pass_policy(self):
rules = {'compute_extension:v3:keypairs:show':
common_policy.parse_rule('')}
policy.set_rules(rules)
req = fakes.HTTPRequestV3.blank('/keypairs/FAKE')
res = self.KeyPairController.show(req, 'FAKE')
self.assertIn('keypair', res)
def test_keypair_create_fail_policy(self):
rules = {'compute_extension:v3:keypairs:create':
common_policy.parse_rule('role:admin')}
policy.set_rules(rules)
req = fakes.HTTPRequestV3.blank('/keypairs')
req.method = 'POST'
self.assertRaises(exception.Forbidden,
self.KeyPairController.create,
req, body={'keypair': {'name': 'create_test'}})
def test_keypair_create_pass_policy(self):
body = {'keypair': {'name': 'create_test'}}
rules = {'compute_extension:v3:keypairs:create':
common_policy.parse_rule('')}
policy.set_rules(rules)
req = fakes.HTTPRequestV3.blank('/keypairs')
req.method = 'POST'
res = self.KeyPairController.create(req, body=body)
self.assertIn('keypair', res)
def test_keypair_delete_fail_policy(self):
rules = {'compute_extension:v3:keypairs:delete':
common_policy.parse_rule('role:admin')}
policy.set_rules(rules)
req = fakes.HTTPRequestV3.blank('/keypairs/FAKE')
req.method = 'DELETE'
self.assertRaises(exception.Forbidden,
self.KeyPairController.delete,
req, 'FAKE')
def test_keypair_delete_pass_policy(self):
rules = {'compute_extension:v3:keypairs:delete':
common_policy.parse_rule('')}
policy.set_rules(rules)
req = fakes.HTTPRequestV3.blank('/keypairs/FAKE')
req.method = 'DELETE'
self.assertIsNone(self.KeyPairController.delete(req, 'FAKE'))
| 39.424947 | 79 | 0.61036 |
20d8f1aed3be39447ba8899b5f580669c3afed1f | 76 | py | Python | Ygdra.Python/ygdra/__init__.py | bsherwin/ProjectY | 1fdbc595030c006b252530e685a6d4fd313a13c2 | [
"MIT"
] | null | null | null | Ygdra.Python/ygdra/__init__.py | bsherwin/ProjectY | 1fdbc595030c006b252530e685a6d4fd313a13c2 | [
"MIT"
] | null | null | null | Ygdra.Python/ygdra/__init__.py | bsherwin/ProjectY | 1fdbc595030c006b252530e685a6d4fd313a13c2 | [
"MIT"
] | null | null | null | from .ygdra import *
from .dataprofile import *
from .lambdaYgdra import *
| 15.2 | 26 | 0.75 |
1a1a98422eca65eed3ccbc8120c323eebc7ef9c4 | 1,347 | py | Python | wyrd/constrained_types/primitives.py | meadsteve/constrained_types | 2a3b87a0b14be70ee2de963acf0eebf302dfe1d9 | [
"MIT"
] | 1 | 2021-05-03T08:53:33.000Z | 2021-05-03T08:53:33.000Z | wyrd/constrained_types/primitives.py | meadsteve/constrained_types | 2a3b87a0b14be70ee2de963acf0eebf302dfe1d9 | [
"MIT"
] | 16 | 2020-10-11T07:46:39.000Z | 2020-10-25T13:29:05.000Z | wyrd/constrained_types/primitives.py | meadsteve/constrained_types | 2a3b87a0b14be70ee2de963acf0eebf302dfe1d9 | [
"MIT"
] | null | null | null | from typing import Any, ClassVar, List
from .core import Constrained, Constraint
from .helpers import validate
class ConstrainedInt(int, Constrained[int]):
_constraints: ClassVar[List[Constraint]] = []
def __init__(self, value: Any):
super(int, self).__init__()
self._validate(self)
@classmethod
def _validate(cls, value):
validate(value, cls._constraints)
# For integration with pydantic
@classmethod
def __get_validators__(cls):
yield lambda v: cls(v)
class ConstrainedString(str, Constrained[str]):
_constraints: ClassVar[List[Constraint]] = []
def __init__(self, value: Any):
super(str, self).__init__()
self._validate(self)
@classmethod
def _validate(cls, value):
validate(value, cls._constraints)
# For integration with pydantic
@classmethod
def __get_validators__(cls):
yield lambda v: cls(v)
class ConstrainedFloat(float, Constrained[float]):
_constraints: ClassVar[List[Constraint]] = []
def __init__(self, value: Any):
super(float, self).__init__()
self._validate(self)
@classmethod
def _validate(cls, value):
validate(value, cls._constraints)
# For integration with pydantic
@classmethod
def __get_validators__(cls):
yield lambda v: cls(v)
| 24.053571 | 50 | 0.668151 |
641560f6de282f94cacc78333ab05b94c4181fa6 | 2,110 | py | Python | package/spack-savanna/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | 1 | 2018-07-17T07:45:09.000Z | 2018-07-17T07:45:09.000Z | package/spack-savanna/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | null | null | null | package/spack-savanna/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | null | null | null | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
from distutils.dir_util import copy_tree
class Savanna(MakefilePackage):
"""CODARcode Savanna runtime framework for high performance,
workflow management using Swift/T and ADIOS.
"""
homepage = "https://github.com/CODARcode/savanna"
url = "https://github.com/CODARcode/savanna/archive/v0.5.tar.gz"
version('develop', git='https://github.com/CODARcode/savanna.git',
branch='master', submodules=True)
version('0.5', git='https://github.com/CODARcode/savanna.git',
tag='0.5', submodules=True)
variant('tau', default=False, description='Enable TAU profiling support')
depends_on('mpi')
depends_on('stc')
depends_on('adios +fortran +zlib +sz +zfp +staging')
depends_on('mpix-launch-swift')
depends_on('tau', when='+tau')
def install(self, spec, prefix):
copy_tree('.', prefix)
| 40.576923 | 78 | 0.671564 |
9c6ea378f78734db2a600c111810107590bf502a | 4,129 | py | Python | setup.py | fatelei/sqlfluff | c5bdd2203c75df7b98ed233a9b36e5628cd43c90 | [
"MIT"
] | 3,024 | 2020-10-01T11:03:51.000Z | 2022-03-31T16:42:00.000Z | setup.py | fatelei/sqlfluff | c5bdd2203c75df7b98ed233a9b36e5628cd43c90 | [
"MIT"
] | 2,395 | 2020-09-30T12:59:21.000Z | 2022-03-31T22:05:29.000Z | setup.py | fatelei/sqlfluff | c5bdd2203c75df7b98ed233a9b36e5628cd43c90 | [
"MIT"
] | 246 | 2020-10-02T17:08:03.000Z | 2022-03-30T17:43:51.000Z | #!/usr/bin/env python
"""The script for setting up sqlfluff."""
import sys
if sys.version_info[0] < 3:
raise Exception("SQLFluff does not support Python 2. Please upgrade to Python 3.")
import configparser
from os.path import dirname
from os.path import join
from setuptools import find_packages, setup
# Get the global config info as currently stated
# (we use the config file to avoid actually loading any python here)
config = configparser.ConfigParser()
config.read(["src/sqlfluff/config.ini"])
version = config.get("sqlfluff", "version")
def read(*names, **kwargs):
"""Read a file and return the contents as a string."""
return open(
join(dirname(__file__), *names), encoding=kwargs.get("encoding", "utf8")
).read()
setup(
name="sqlfluff",
version=version,
license="MIT License",
description="The SQL Linter for Humans",
long_description=read("README.md"),
# Make sure pypi is expecting markdown!
long_description_content_type="text/markdown",
author="Alan Cruickshank",
author_email="alan@designingoverload.com",
url="https://github.com/sqlfluff/sqlfluff",
python_requires=">=3.6",
keywords=[
"sqlfluff",
"sql",
"linter",
"formatter",
"bigquery",
"exasol",
"hive",
"mysql",
"postgres",
"redshift",
"snowflake",
"sqlite",
"teradata",
"tsql",
"dbt",
],
project_urls={
"Homepage": "https://www.sqlfluff.com",
"Documentation": "https://docs.sqlfluff.com",
"Changes": "https://github.com/sqlfluff/sqlfluff/blob/main/CHANGELOG.md",
"Source": "https://github.com/sqlfluff/sqlfluff",
"Issue Tracker": "https://github.com/sqlfluff/sqlfluff/issues",
"Twitter": "https://twitter.com/SQLFluff",
"Chat": "https://github.com/sqlfluff/sqlfluff#sqlfluff-on-slack",
},
packages=find_packages(where="src"),
package_dir={"": "src"},
include_package_data=True,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 4 - Beta",
# 'Development Status :: 5 - Production/Stable',
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: Unix",
"Operating System :: POSIX",
"Operating System :: MacOS",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Utilities",
"Topic :: Software Development :: Quality Assurance",
],
install_requires=[
# Core
"click>=7.1",
"colorama>=0.3",
"configparser",
"oyaml",
"Jinja2",
# Used for diffcover plugin
"diff-cover>=2.5.0",
# Used for .sqlfluffignore
"pathspec",
# Used for finding os-specific application config dirs
"appdirs",
# Cached property for performance gains
"cached-property",
# dataclasses backport for python 3.6
"dataclasses; python_version < '3.7'",
# better type hints for older python versions
"typing_extensions",
# We provide a testing library for plugins in sqlfluff.testing
"pytest",
# For parsing pyproject.toml
"toml",
# For returning exceptions from multiprocessing.Pool.map()
"tblib",
],
entry_points={
"console_scripts": [
"sqlfluff = sqlfluff.cli.commands:cli",
],
"diff_cover": ["sqlfluff = sqlfluff.diff_quality_plugin"],
"sqlfluff": ["sqlfluff = sqlfluff.core.plugin.lib"],
},
)
| 32.007752 | 90 | 0.604747 |
675a5c3108ff79c3c584239320584e006708a20d | 273 | py | Python | setup.py | searchivarius/DeepNLP-models-Pytorch | 91a40f767a6ac0ba7004f1bc015ad8a5debf4203 | [
"MIT"
] | 3,067 | 2017-11-10T01:25:25.000Z | 2022-03-31T08:41:25.000Z | setup.py | fanlidublin/DeepNLP-models-Pytorch | ceeb4221b176790229cd20c6ca4c05f625bdf02e | [
"MIT"
] | 12 | 2017-11-17T08:33:39.000Z | 2021-04-25T14:43:30.000Z | setup.py | fanlidublin/DeepNLP-models-Pytorch | ceeb4221b176790229cd20c6ca4c05f625bdf02e | [
"MIT"
] | 729 | 2017-11-10T01:35:33.000Z | 2022-03-31T08:41:33.000Z | from distutils.core import setup
from Cython.Build import cythonize
from Cython.Distutils import build_ext
from distutils.extension import Extension
setup(name="Corpus",cmdclass = {'build_ext': build_ext},ext_modules = [Extension("Corpus", ["Corpus.pyx"],language='c++')]) | 45.5 | 123 | 0.783883 |
30d7779dec522ffda896f3a19f77563b8a92f827 | 6,320 | py | Python | presqt/targets/zenodo/functions/fetch.py | craig-willis/presqt | b920527bf8998696f516d65a50f0a5c3862c4558 | [
"Apache-2.0"
] | null | null | null | presqt/targets/zenodo/functions/fetch.py | craig-willis/presqt | b920527bf8998696f516d65a50f0a5c3862c4558 | [
"Apache-2.0"
] | null | null | null | presqt/targets/zenodo/functions/fetch.py | craig-willis/presqt | b920527bf8998696f516d65a50f0a5c3862c4558 | [
"Apache-2.0"
] | null | null | null | import requests
from rest_framework import status
from presqt.targets.zenodo.utilities import (
zenodo_validation_check, zenodo_fetch_resources_helper, zenodo_fetch_resource_helper)
from presqt.utilities import PresQTValidationError, PresQTResponseException
def zenodo_fetch_resources(token, search_parameter):
"""
Fetch all users repos from Zenodo.
Parameters
----------
token : str
User's Zenodo token
search_parameter : dict
The search parameter passed to the API View
Gets passed formatted as {'title': 'search_info'}
Returns
-------
List of dictionary objects that represent Zenodo resources.
Dictionary must be in the following format
{
"kind": "container",
"kind_name": "folder",
"id": "12345",
"container": "None",
"title": "Folder Name"
}
"""
try:
auth_parameter = zenodo_validation_check(token)
except PresQTValidationError:
raise PresQTValidationError("Token is invalid. Response returned a 401 status code.",
status.HTTP_401_UNAUTHORIZED)
# Let's build them resources
if search_parameter:
if 'title' in search_parameter:
search_parameters = search_parameter['title'].replace(' ', '+')
base_url = 'https://zenodo.org/api/records?q=title:"{}"&sort=most_recent'.format(
search_parameters)
zenodo_projects = requests.get(base_url, params=auth_parameter).json()['hits']['hits']
is_record = True
elif 'id' in search_parameter:
base_url = 'https://zenodo.org/api/records?q=conceptrecid:{}'.format(search_parameter['id'])
zenodo_projects = requests.get(base_url, params=auth_parameter).json()['hits']['hits']
is_record = True
else:
base_url = "https://zenodo.org/api/deposit/depositions"
zenodo_projects = requests.get(base_url, params=auth_parameter).json()
is_record = False
resources = zenodo_fetch_resources_helper(zenodo_projects, auth_parameter, is_record)
return resources
def zenodo_fetch_resource(token, resource_id):
"""
Fetch the Zenodo resource matching the resource_id given.
Parameters
----------
token : str
User's Zenodo token
resource_id : str
ID of the resource requested
Returns
-------
A dictionary object that represents the Zenodo resource.
Dictionary must be in the following format:
{
"kind": "container",
"kind_name": "repo",
"id": "12345",
"title": "23296359282_934200ec59_o.jpg",
"date_created": "2019-05-13T14:54:17.129170Z",
"date_modified": "2019-05-13T14:54:17.129170Z",
"hashes": {
"md5": "aaca7ef067dcab7cb8d79c36243823e4",
},
"extra": {
"any": extra,
"values": here
}
}
"""
try:
auth_parameter = zenodo_validation_check(token)
except PresQTValidationError:
raise PresQTValidationError("Token is invalid. Response returned a 401 status code.",
status.HTTP_401_UNAUTHORIZED)
# Let's first try to get the record with this id.
if len(resource_id) <= 7:
base_url = "https://zenodo.org/api/records/{}".format(resource_id)
zenodo_project = requests.get(base_url, params=auth_parameter)
if zenodo_project.status_code == 200:
# We found the record, pass the project to our function.
resource = zenodo_fetch_resource_helper(zenodo_project.json(), resource_id, True)
else:
# We need to get the resource from the depositions
base_url = "https://zenodo.org/api/deposit/depositions/{}".format(resource_id)
zenodo_project = requests.get(base_url, params=auth_parameter)
if zenodo_project.status_code != 200:
raise PresQTResponseException("The resource could not be found by the requesting user.",
status.HTTP_404_NOT_FOUND)
else:
resource = zenodo_fetch_resource_helper(
zenodo_project.json(), resource_id, False, False)
else:
# We got ourselves a file.
base_url = "https://zenodo.org/api/files/{}".format(resource_id)
zenodo_project = requests.get(base_url, params=auth_parameter)
if zenodo_project.status_code == 200:
# Contents returns a list of the single file
resource = zenodo_fetch_resource_helper(
zenodo_project.json()['contents'][0], resource_id, True, True)
else:
# We need to loop through the users depositions and see if the file is there.
base_url = 'https://zenodo.org/api/deposit/depositions'
zenodo_projects = requests.get(base_url, params=auth_parameter).json()
for entry in zenodo_projects:
project_files = requests.get(entry['links']['self'], params=auth_parameter).json()
for entry in project_files['files']:
if entry['id'] == resource_id:
resource = {
"kind": "item",
"kind_name": "file",
"id": resource_id,
"title": entry['filename'],
"date_created": None,
"date_modified": None,
"hashes": {
"md5": entry['checksum']
},
"extra": {}}
# We found the file, break out of file loop
break
# If the file wasn't found, we want to continue looping through the other projects.
else:
continue
# File has been found, break out of project loop
break
# File not found, raise exception
else:
raise PresQTResponseException("The resource could not be found by the requesting user.",
status.HTTP_404_NOT_FOUND)
return resource
| 40 | 104 | 0.580854 |
8e7c9eba66bcb1d687d080e938aaf337453f49d1 | 2,655 | py | Python | tests/model/model_constructor_test.py | nickgaya/bravado-core | 16e752963bfceb4adfa43724085bc4127eefcd59 | [
"BSD-3-Clause"
] | 122 | 2015-04-22T17:31:18.000Z | 2021-11-08T10:29:57.000Z | tests/model/model_constructor_test.py | nickgaya/bravado-core | 16e752963bfceb4adfa43724085bc4127eefcd59 | [
"BSD-3-Clause"
] | 364 | 2015-04-10T22:19:23.000Z | 2022-02-25T08:55:10.000Z | tests/model/model_constructor_test.py | nickgaya/bravado-core | 16e752963bfceb4adfa43724085bc4127eefcd59 | [
"BSD-3-Clause"
] | 118 | 2015-04-20T15:11:53.000Z | 2021-12-09T10:03:34.000Z | # -*- coding: utf-8 -*-
import pytest
from bravado_core.schema import collapsed_properties
def test_simple(user_type, user_kwargs):
user = user_type(**user_kwargs)
assert user.firstName == 'Darwin'
assert user.userStatus == 9
assert user.id == 999
assert user.lastName is None
assert user.email is None
assert user.password is None
def test_init_from_dict(user_type, user_kwargs):
user = user_type._from_dict(user_kwargs)
assert user.firstName == 'Darwin'
assert user.userStatus == 9
assert user.id == 999
assert user.lastName is None
assert user.email is None
assert user.password is None
def test_empty_kwargs(user_type):
user = user_type()
assert user.firstName is None
assert user.userStatus is None
assert user.id is None
assert user.lastName is None
assert user.email is None
assert user.password is None
def test_additionalProperties_defaults_to_true_when_not_present(user_type, user_kwargs):
# verify exra kwargs are attached to the model as attributes when
# additionalProperties is not present
user_kwargs['foo'] = 'bar'
user = user_type(**user_kwargs)
assert user.foo == 'bar'
assert 'foo' in dir(user)
def test_additionalProperties_true(definitions_spec, user_type, user_kwargs):
# verify exra kwargs are attached to the model as attributes when
# additionalProperties is True
user_type._model_spec['additionalProperties'] = True
user_kwargs['foo'] = 'bar' # additional prop
user = user_type(**user_kwargs)
assert user.foo == 'bar'
assert 'foo' in dir(user)
assert set(user) == set(definitions_spec['User']['properties'].keys()).union({'foo'})
def test_additionalProperties_false(user_type, user_kwargs):
# verify exra kwargs are caught during model construction when
# additionalProperties is False
user_type._model_spec['additionalProperties'] = False
user_kwargs['foo'] = 'bar' # additional prop
with pytest.raises(AttributeError) as excinfo:
user_type(**user_kwargs)
assert "does not have attributes for: ['foo']" in str(excinfo.value)
def test_allOf(cat_swagger_spec, cat_type, cat_kwargs):
cat = cat_type(**cat_kwargs)
assert cat.id == 12
assert cat.category == {'id': 42, 'name': 'Feline'}
assert cat.name == 'Oskar'
assert cat.photoUrls == ['example.com/img1', 'example.com/img2']
assert cat.tags == [{'id': 1, 'name': 'cute'}]
assert cat.neutered is True
assert set(cat) == set(
collapsed_properties(
cat_swagger_spec.spec_dict['definitions']['Cat'], cat_swagger_spec,
).keys(),
)
| 33.1875 | 89 | 0.700565 |
6909bf3508e24903ccbc8d53cbe01dd9b0e6a191 | 70 | py | Python | main.py | deut-erium/pyfractal | d62d1d3e8e34924c4272b171d82d62f47a1d1745 | [
"MIT"
] | 12 | 2020-05-30T13:53:11.000Z | 2021-08-17T04:56:01.000Z | main.py | deut-erium/pyfractal | d62d1d3e8e34924c4272b171d82d62f47a1d1745 | [
"MIT"
] | 1 | 2020-05-30T10:56:15.000Z | 2020-05-30T13:55:05.000Z | main.py | deut-erium/pyfractal | d62d1d3e8e34924c4272b171d82d62f47a1d1745 | [
"MIT"
] | null | null | null | from pyfractal import GUI
if __name__ == '__main__':
GUI().run()
| 14 | 26 | 0.657143 |
b64b8f242d6dac3e1d306f9cb946681cd37d5b3c | 3,445 | py | Python | src/val_lfw.py | yangfly/insightface | 82bc323e0362580a4b5a793393c2dca05551270b | [
"MIT"
] | 18 | 2019-05-14T06:41:46.000Z | 2021-12-24T02:15:42.000Z | repos/insightface/src/val_lfw.py | batermj/DeepVideoAnalytics | daad116b87370fce1799b7948af73b92f617cf41 | [
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] | null | null | null | repos/insightface/src/val_lfw.py | batermj/DeepVideoAnalytics | daad116b87370fce1799b7948af73b92f617cf41 | [
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] | 3 | 2018-03-29T08:18:58.000Z | 2020-06-29T11:43:50.000Z | from __future__ import division
import numpy as np
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), 'eval'))
import lfw
import sklearn
from sklearn.decomposition import PCA
lfw_dir = '/raid5data/dplearn/lfw_mtcnn2'
lfw_pairs = lfw.read_pairs(os.path.join(lfw_dir, 'pairs.txt'))
lfw_paths, issame_list = lfw.get_paths(lfw_dir, lfw_pairs, 'jpg')
model_dir = '../model'
models = ['sphereface_p0_20-lfw-0006.npy']
models = ['sphereface-64-p0_0_96_95_0-lfw-0001.npy']
models = ['sphereface_p0_20-lfw-0006.npy', 'sphereface-64-p0_0_96_95_0-lfw-0001.npy']
models = ['sphereface-20-p0_0_96_112_0-lfw-0022.npy','sphereface-20-p0_0_96_95_0-lfw-0021.npy', 'sphereface-20-p0_40_96_112_0-lfw-0022.npy']
models = ['sphereface-20-p0_0_96_112_0-lfw-0022.npy','sphereface-20-p0_0_96_95_0-lfw-0021.npy', 'sphereface-36-p0_0_96_112_0-lfw-0022.npy']
models = ['sphereface-20-p0_0_96_112_0-lfw-0022.npy','sphereface-20-p0_0_96_95_0-lfw-0021.npy']
models = ['sphereface-20-p0_0_96_112_0-lfw-0022.npy','sphereface-20-p0_0_96_95_0-lfw-0021.npy', 'sphereface-36-p0_0_96_112_0-lfw-0022.npy']
models = ['sphereface-20-p0_0_96_112_0-lfw-0022.npy','sphereface-20-p0_0_96_95_0-lfw-0021.npy', 'sphereface-36-p0_0_96_95_0-lfw-0021.npy']
models = [
#'sphereface-20-p0_0_96_112_0-lfw-0022.npy',
#'sphereface-20-p0_0_96_95_0-lfw-0021.npy',
#'sphereface-20-p0_0_80_95_0-lfw-0021.npy',
#'sphereface-36-p0_0_96_95_0-lfw-0021.npy',
'sphereface-s60-p0_0_96_112_0-lfw-0031.npy',
'sphereface-s60-p0_0_96_95_0-lfw-0021.npy',
'sphereface2-s60-p0_0_96_112_0-lfw-0021.npy',
'sphereface3-s60-p0_0_96_95_0-lfw-0023.npy',
#'sphereface-s60-p0_0_80_95_0-lfw-0025.npy',
#'sphereface-s60-p16_0_96_112_0-lfw-0023.npy',
#'spherefacec-s60-p0_0_96_112_0-lfw-0021.npy',
]
models = [
'../model31/sphere-m51-p0_0_96_112_0-lfw-0083.npy',
'../model/softmax-m53-p0_0_96_112_0-lfw-0026.npy',
#'../model32/sphere-m30-p0_0_96_112_0-lfw-0092.npy',
]
#models = models[0:3]
concat = True
pca = False
weights = None
#weights = [0.5, 1.0, 0.5]
F = None
ii = 0
for m in models:
model = m
#model = os.path.join(model_dir, m)
X = np.load(model)
X1 = X[0:(X.shape[0]//2),:]
X2 = X[(X.shape[0]//2):,:]
print(X.shape, X1.shape, X2.shape)
#X1 = sklearn.preprocessing.normalize(X1)
#X2 = sklearn.preprocessing.normalize(X2)
XX = X1+X2
XX = sklearn.preprocessing.normalize(XX)
if weights is not None:
weight = weights[ii]
XX *= weight
if F is None:
F = XX
else:
if concat:
F = np.concatenate((F,XX), axis=1)
else:
F += XX
ii+=1
#if concat:
# F = np.concatenate((F,X2), axis=1)
#else:
# F += X2
print(F.shape)
npca = 0
if concat and pca:
#F = sklearn.preprocessing.normalize(F)
npca = 180
#pca = PCA(n_components=512)
#F = pca.fit_transform(F)
for npca in xrange(512,513,1):
_, _, accuracy, val, val_std, far = lfw.evaluate(F, issame_list, nrof_folds=10, pca=npca)
print('[%d]Accuracy: %1.5f+-%1.5f' % (npca, np.mean(accuracy), np.std(accuracy)))
else:
F = sklearn.preprocessing.normalize(F)
_, _, accuracy, val, val_std, far = lfw.evaluate(F, issame_list, nrof_folds=10, pca=npca)
print('[%d]Accuracy: %1.5f+-%1.5f' % (0, np.mean(accuracy), np.std(accuracy)))
print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
| 37.043011 | 140 | 0.67402 |
a5c008801b6907bd7a5f886c5256330ddff1ecd3 | 6,604 | py | Python | hyppo/independence/kmerf.py | zdbzdb1212/hyppo | 5092beedec0a0c13ffa69f7a77f4ee30f3294256 | [
"MIT"
] | 1 | 2021-12-14T10:32:18.000Z | 2021-12-14T10:32:18.000Z | hyppo/independence/kmerf.py | zdbzdb1212/hyppo | 5092beedec0a0c13ffa69f7a77f4ee30f3294256 | [
"MIT"
] | null | null | null | hyppo/independence/kmerf.py | zdbzdb1212/hyppo | 5092beedec0a0c13ffa69f7a77f4ee30f3294256 | [
"MIT"
] | 2 | 2021-11-03T19:34:39.000Z | 2021-11-30T19:28:40.000Z | from typing import NamedTuple
import numpy as np
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.metrics import pairwise_distances
from ._utils import _CheckInputs, sim_matrix
from .base import IndependenceTest
from .dcorr import _dcorr
FOREST_TYPES = {
"classifier": RandomForestClassifier,
"regressor": RandomForestRegressor,
}
class KMERFTestOutput(NamedTuple):
stat: float
pvalue: float
kmerf_dict: dict
class KMERF(IndependenceTest):
r"""
Kernel Mean Embedding Random Forest (KMERF) test statistic and p-value.
The KMERF test statistic is a kernel method for calculating independence by using
a random forest induced similarity matrix as an input, and has been shown to have
especially high gains in finite sample testing power in high dimensional settings
:footcite:p:`shenLearningInterpretableCharacteristic2020`.
Parameters
----------
forest : "regressor", "classifier", default: "regressor"
Type of forest used when running the independence test. If the `y` input in
``test`` is categorial, use the "classifier" keyword.
ntrees : int, default: 500
The number of trees used in the random forest.
**kwargs
Additional arguments used for the forest (see
:class:`sklearn.ensemble.RandomForestClassifier` or
:class:`sklearn.ensemble.RandomForestRegressor`)
Notes
-----
A description of KMERF in greater detail can be found in
:footcite:p:`shenLearningInterpretableCharacteristic2020`. It is computed
using the following steps:
Let :math:`x` and :math:`y` be :math:`(n, p)` and :math:`(n, 1)` samples of random
variables
:math:`X` and :math:`Y`.
+ Run random forest with :math:`m` trees. Independent bootstrap samples of size
:math:`n_{b} \leq n` are drawn to build a tree each time; each tree structure
within the forest is denoted as :math:`\phi_w \in \mathbf{P}`,
:math:`w \in \{ 1, \ldots, m \}`; :math:`\phi_w(x_i)` denotes the partition
assigned to :math:`x_i`.
+ Calculate the proximity kernel:
.. math::
\mathbf{K}^{\mathbf{x}}_{ij} = \frac{1}{m} \sum_{w = 1}^{m} I(\phi_w(x_i)
= \phi_w(x_j))
where :math:`I(\cdot)`$` is the indicator function for how often two observations
lie in the same partition.
+ Compute the induced kernel correlation: Let
.. math::
\mathbf{L}^{\mathbf{x}}_{ij}=
\begin{cases}
\mathbf{K}^{\mathbf{x}}_{ij}
- \frac{1}{n-2} \sum_{t=1}^{n} \mathbf{K}^{\mathbf{x}}_{it}
- \frac{1}{n-2} \sum_{s=1}^{n} \mathbf{K}^{\mathbf{x}}_{sj}
+ \frac{1}{(n-1)(n-2)} \sum_{s,t=1}^{n} \mathbf{K}^{\mathbf{x}}_{st}
& \mbox{when} \ i \neq j \\
0 & \mbox{ otherwise}
\end{cases}
+ Then let :math:`\mathbf{K}^{\mathbf{y}}` be the Euclidean distance induced kernel,
and similarly compute :math:`\mathbf{L}^{\mathbf{y}}` from
:math:`\mathbf{K}^{\mathbf{y}}`. The unbiased kernel correlation equals
.. math::
\mathrm{KMERF}_n(\mathbf{x}, \mathbf{y}) = \frac{1}{n(n-3)}
\mathrm{tr} \left( \mathbf{L}^{\mathbf{x}} \mathbf{L}^{\mathbf{y}} \right)
The p-value returned is calculated using a permutation test using
:meth:`hyppo.tools.perm_test`.
References
----------
.. footbibliography::
"""
def __init__(self, forest="regressor", ntrees=500, **kwargs):
if forest in FOREST_TYPES.keys():
self.clf = FOREST_TYPES[forest](n_estimators=ntrees, **kwargs)
else:
raise ValueError("Forest must be of type classification or regression")
IndependenceTest.__init__(self)
def statistic(self, x, y):
r"""
Helper function that calculates the KMERF test statistic.
Parameters
----------
x,y : ndarray
Input data matrices. ``x`` and ``y`` must have the same number of
samples. That is, the shapes must be ``(n, p)`` and ``(n, 1)`` where
`n` is the number of samples and `p` is the number of
dimensions.
Returns
-------
stat : float
The computed KMERF statistic.
"""
y = y.reshape(-1)
self.clf.fit(x, y)
distx = np.sqrt(1 - sim_matrix(self.clf, x))
y = y.reshape(-1, 1)
disty = pairwise_distances(y, metric="euclidean")
stat = _dcorr(distx, disty, bias=False, is_fast=False)
self.stat = stat
# get normalalized feature importances
importances = self.clf.feature_importances_
importances -= np.min(importances)
self.importances = importances / np.max(importances)
return stat
def test(self, x, y, reps=1000, workers=1, random_state=None):
r"""
Calculates the KMERF test statistic and p-value.
Parameters
----------
x,y : ndarray
Input data matrices. ``x`` and ``y`` must have the same number of
samples. That is, the shapes must be ``(n, p)`` and ``(n, 1)`` where
`n` is the number of samples and `p` is the number of
dimensions.
reps : int, default: 1000
The number of replications used to estimate the null distribution
when using the permutation test used to calculate the p-value.
workers : int, default: 1
The number of cores to parallelize the p-value computation over.
Supply ``-1`` to use all cores available to the Process.
Returns
-------
stat : float
The computed KMERF statistic.
pvalue : float
The computed KMERF p-value.
kmerf_dict : dict
Contains additional useful returns containing the following keys:
- feat_importance : ndarray
An array containing the importance of each dimension
Examples
--------
>>> import numpy as np
>>> from hyppo.independence import KMERF
>>> x = np.arange(100)
>>> y = x
>>> '%.1f, %.2f' % KMERF().test(x, y)[:1] # doctest: +SKIP
'1.0, 0.001'
"""
check_input = _CheckInputs(x, y, reps=reps)
x, y = check_input()
stat, pvalue = super(KMERF, self).test(
x, y, reps, workers, is_distsim=False, random_state=random_state
)
kmerf_dict = {"feat_importance": self.importances}
return KMERFTestOutput(stat, pvalue, kmerf_dict)
| 35.12766 | 88 | 0.602059 |
ba1f5244d61e9c47da19cb867237941e08bc843a | 141 | py | Python | web/Viewer/urls.py | carstenfuchs/hallcam | 4ef0d805d1b28918aa6ab0a078b1e1668c0aac27 | [
"MIT"
] | null | null | null | web/Viewer/urls.py | carstenfuchs/hallcam | 4ef0d805d1b28918aa6ab0a078b1e1668c0aac27 | [
"MIT"
] | null | null | null | web/Viewer/urls.py | carstenfuchs/hallcam | 4ef0d805d1b28918aa6ab0a078b1e1668c0aac27 | [
"MIT"
] | null | null | null | from django.urls import path
from .views import welcome
app_name = 'viewer'
urlpatterns = [
path('', welcome.view, name='welcome'),
]
| 14.1 | 43 | 0.687943 |
a91d591251f0d2cf4b6860812a53dcda668a72d9 | 7,375 | py | Python | src/main/python/programmingtheiot/cda/system/SensorAdapterManager.py | NULishengZhang/piot-python-components | 006674bc42443bb2a843bfd7dfa5b55be9843961 | [
"MIT"
] | null | null | null | src/main/python/programmingtheiot/cda/system/SensorAdapterManager.py | NULishengZhang/piot-python-components | 006674bc42443bb2a843bfd7dfa5b55be9843961 | [
"MIT"
] | null | null | null | src/main/python/programmingtheiot/cda/system/SensorAdapterManager.py | NULishengZhang/piot-python-components | 006674bc42443bb2a843bfd7dfa5b55be9843961 | [
"MIT"
] | null | null | null | #####
#
# This class is part of the Programming the Internet of Things project.
#
# It is provided as a simple shell to guide the student and assist with
# implementation for the Programming the Internet of Things exercises,
# and designed to be modified by the student as needed.
#
import logging
from importlib import import_module
from apscheduler.schedulers.background import BackgroundScheduler
import programmingtheiot.common.ConfigConst as ConfigConst
from programmingtheiot.common.ConfigUtil import ConfigUtil
from programmingtheiot.common.IDataMessageListener import IDataMessageListener
from programmingtheiot.cda.sim.SensorDataGenerator import SensorDataGenerator
from programmingtheiot.cda.sim.HumiditySensorSimTask import HumiditySensorSimTask
from programmingtheiot.cda.sim.TemperatureSensorSimTask import TemperatureSensorSimTask
from programmingtheiot.cda.sim.PressureSensorSimTask import PressureSensorSimTask
class SensorAdapterManager(object):
"""
Shell representation of class for student implementation.
"""
# SensorAdapterManager Constructor
def __init__(self, useEmulator=False):
configUtil = ConfigUtil()
# set default pollRate
self.pollRate = configUtil.getInteger( section=ConfigConst.CONSTRAINED_DEVICE, key=ConfigConst.POLL_CYCLES_KEY,defaultVal=ConfigConst.DEFAULT_POLL_CYCLES)
# set default useEmulator
if not useEmulator:
self.useEmulator = configUtil.getBoolean(section=ConfigConst.CONSTRAINED_DEVICE,
key=ConfigConst.ENABLE_EMULATOR_KEY)
else:
self.useEmulator = useEmulator
# set default locationID
self.locationID = configUtil.getProperty( section=ConfigConst.CONSTRAINED_DEVICE, key=ConfigConst.DEVICE_LOCATION_ID_KEY, defaultVal=ConfigConst.NOT_SET)
# check if pollRate is valid
if self.pollRate <= 0:
self.pollRate = ConfigConst.DEFAULT_POLL_CYCLES
# set scheduler job
self.scheduler = BackgroundScheduler()
self.scheduler.add_job( self.handleTelemetry, 'interval', seconds=self.pollRate)
self.dataMsgListener = None
if self.useEmulator:
logging.info("Emulators will be used!")
# load the temperature sensor emulator (you can use either `import_module()` as shown, or `__import__()`)
tempModule = import_module('programmingtheiot.cda.emulated.TemperatureSensorEmulatorTask',
'TemperatureSensorEmulatorTask')
teClazz = getattr(tempModule, 'TemperatureSensorEmulatorTask')
self.tempAdapter = teClazz()
# load the pressure sensor emulator (you can use either `import_module()` as shown, or `__import__()`)
pressureModule = import_module('programmingtheiot.cda.emulated.PressureSensorEmulatorTask',
'PressureSensorEmulatorTask')
prClazz = getattr(pressureModule, 'PressureSensorEmulatorTask')
self.pressureAdapter = prClazz()
# load the humidity sensor emulator (you can use either `import_module()` as shown, or `__import__()`)
humidityModule = import_module('programmingtheiot.cda.emulated.HumiditySensorEmulatorTask',
'HumiditySensorEmulatorTask')
hmClazz = getattr(humidityModule, 'HumiditySensorEmulatorTask')
self.humidityAdapter = hmClazz()
else:
self.dataGenerator = SensorDataGenerator()
# set temperature data
tempFloor = configUtil.getFloat( section=ConfigConst.CONSTRAINED_DEVICE, key=ConfigConst.TEMP_SIM_FLOOR_KEY, defaultVal= SensorDataGenerator.LOW_NORMAL_INDOOR_TEMP)
tempCeiling = configUtil.getFloat(
section=ConfigConst.CONSTRAINED_DEVICE,
key=ConfigConst.TEMP_SIM_CEILING_KEY,
defaultVal= SensorDataGenerator.HI_NORMAL_INDOOR_TEMP)
tempData = self.dataGenerator.generateDailyIndoorTemperatureDataSet(
minValue=tempFloor,
maxValue=tempCeiling,
useSeconds=False)
self.tempAdapter = TemperatureSensorSimTask(dataSet=tempData)
# set pressure data
pressureFloor =configUtil.getFloat(
section=ConfigConst.CONSTRAINED_DEVICE,
key=ConfigConst.PRESSURE_SIM_FLOOR_KEY,
defaultVal=SensorDataGenerator.LOW_NORMAL_ENV_PRESSURE)
pressureCeiling = configUtil.getFloat(
section=ConfigConst.CONSTRAINED_DEVICE,
key=ConfigConst.PRESSURE_SIM_CEILING_KEY,
defaultVal= SensorDataGenerator.HI_NORMAL_ENV_PRESSURE)
pressureData = self.dataGenerator.generateDailyEnvironmentPressureDataSet(
minValue=pressureFloor,
maxValue=pressureCeiling,
useSeconds=False)
self.pressureAdapter = PressureSensorSimTask(dataSet=pressureData)
# set humidity data
humidityFloor = configUtil.getFloat(
section=ConfigConst.CONSTRAINED_DEVICE,
key=ConfigConst.HUMIDITY_SIM_FLOOR_KEY,
defaultVal= SensorDataGenerator.LOW_NORMAL_ENV_HUMIDITY)
humidityCeiling = configUtil.getFloat(
section=ConfigConst.CONSTRAINED_DEVICE,
key=ConfigConst.HUMIDITY_SIM_CEILING_KEY,
defaultVal=SensorDataGenerator.HI_NORMAL_ENV_HUMIDITY)
humidityData = self.dataGenerator.generateDailyEnvironmentHumidityDataSet(minValue=humidityFloor,maxValue=humidityCeiling,useSeconds=False)
self.humidityAdapter = HumiditySensorSimTask(dataSet=humidityData)
# handle telemetry data
def handleTelemetry(self):
humidityData = self.humidityAdapter.generateTelemetry()
pressureData = self.pressureAdapter.generateTelemetry()
tempData = self.tempAdapter.generateTelemetry()
humidityData.setLocationID(self.locationID)
pressureData.setLocationID(self.locationID)
tempData.setLocationID(self.locationID)
logging.info('Generated humidity data: ' + str(humidityData))
logging.info('Generated pressure data: ' + str(pressureData))
logging.info('Generated temp data: ' + str(tempData))
if self.dataMsgListener:
self.dataMsgListener.handleSensorMessage(humidityData)
self.dataMsgListener.handleSensorMessage(pressureData)
self.dataMsgListener.handleSensorMessage(tempData)
# set listener
def setDataMessageListener(self, listener: IDataMessageListener) -> bool:
if listener:
self.dataMsgListener = listener
# start manager
def startManager(self):
logging.info('Started SensorAdapterManager.')
if not self.scheduler.running:
self.scheduler.start()
else:
logging.warning( 'SensorAdapterManager scheduler already started.')
# stop manager
def stopManager(self):
logging.info('Stopped SensorAdapterManager.')
try:
self.scheduler.shutdown()
except:
logging.warning( 'SensorAdapterManager scheduler already stopped.')
| 42.877907 | 176 | 0.687458 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.