hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0d607dd9d30fd2b66bf03406a4293eb03eebd09f | 548 | py | Python | june-challenge/min-cost-stairs.py | kd82/interview-preparation | a47de06c677b37c8160d1429f43e38288c157754 | [
"MIT"
] | null | null | null | june-challenge/min-cost-stairs.py | kd82/interview-preparation | a47de06c677b37c8160d1429f43e38288c157754 | [
"MIT"
] | null | null | null | june-challenge/min-cost-stairs.py | kd82/interview-preparation | a47de06c677b37c8160d1429f43e38288c157754 | [
"MIT"
] | null | null | null | from collections import List | 26.095238 | 63 | 0.481752 | from collections import List
def minCostClimbingStairs(cost: List[int]) -> int:
def dfs(cost, i):
if i >= len(cost):
return 0
if dp[i] > -1:
return dp[i]
res = cost[i] + min(dfs(cost, i + 1), dfs(cost, i + 2))
dp[i] = res
return res
dp = [-1]*(len(cost))
return min(dfs(cost, 0), dfs(cost, 1))
def dfsbu(cost):
d1, d2 = 0, 0
for i in range(2, len(cost) + 1):
temp = d1
d1 = min(cost[i - 1] + d1, cost[i - 2] + d2)
d2 = temp
return d1 | 474 | 0 | 46 |
4b4e3ddd829c54567bd78d89e6a6a0b6affd419f | 1,010 | py | Python | portlab/evaluate/lib/backtest.py | sqt-aliu/portlab | 366755c2cfe7bb53c1b236688684fc2c9d8bf4d1 | [
"MIT"
] | 1 | 2017-11-06T06:01:24.000Z | 2017-11-06T06:01:24.000Z | portlab/evaluate/lib/backtest.py | sqt-aliu/portlab | 366755c2cfe7bb53c1b236688684fc2c9d8bf4d1 | [
"MIT"
] | null | null | null | portlab/evaluate/lib/backtest.py | sqt-aliu/portlab | 366755c2cfe7bb53c1b236688684fc2c9d8bf4d1 | [
"MIT"
] | null | null | null | import numpy as np
# core functions
totalreturn = lambda x: (x[-1]/x[0])-1
finalreturn = lambda x: x[-1]
sharpe = lambda x: np.sqrt(12) * (np.mean(x) / np.std(x))
ann_vol = lambda x: np.sqrt(12) * np.std(x)
cagr = lambda x: ((((x[-1]) / x[0])) ** (12.0/(x.count()-1))) - 1
cagr2 = lambda x: ((np.mean(x)+1) ** 12) -1
| 21.956522 | 65 | 0.447525 | import numpy as np
# core functions
totalreturn = lambda x: (x[-1]/x[0])-1
finalreturn = lambda x: x[-1]
sharpe = lambda x: np.sqrt(12) * (np.mean(x) / np.std(x))
ann_vol = lambda x: np.sqrt(12) * np.std(x)
cagr = lambda x: ((((x[-1]) / x[0])) ** (12.0/(x.count()-1))) - 1
cagr2 = lambda x: ((np.mean(x)+1) ** 12) -1
def max_drawdown(X):
mdd = 0
peak = X[0]
for x in X:
if x > peak:
peak = x
dd = (peak - x)
if dd > mdd:
mdd = dd
return mdd
def avg_max_drawdown(X):
ldd = []
mdd = 0
peak = X[0]
for x in X:
if x > peak:
peak = x
dd = (peak - x)
if dd > mdd:
mdd = dd
ldd.append(mdd)
return 0 if len(ldd) == 0 else np.mean(ldd)
def bca_max_drawdown(X):
mdd = 0
peak = X[0]
for x in X:
if x > peak:
peak = x
if peak != 0:
dd = 1-x/peak
if dd > mdd:
mdd = dd
return mdd
| 607 | 0 | 81 |
18efce23cede35e1ce11c9f84824727b1df23f9d | 2,044 | py | Python | sample_scripts/retarget_campaign.py | decisive/api-demo-python | 58cd14e9e1f6373a3cd927536fd29f5f286940a0 | [
"MIT"
] | null | null | null | sample_scripts/retarget_campaign.py | decisive/api-demo-python | 58cd14e9e1f6373a3cd927536fd29f5f286940a0 | [
"MIT"
] | null | null | null | sample_scripts/retarget_campaign.py | decisive/api-demo-python | 58cd14e9e1f6373a3cd927536fd29f5f286940a0 | [
"MIT"
] | null | null | null | import sys # NOTE: for exiting
import requests
import datetime
import pprint
import ujson as json # NOTE: faster json
API_KEY = '' # TODO: input api key here!!!
if not API_KEY:
sys.exit('Please insert your Decisive API key')
print
print 'Creating session to always add API key...'
# NOTE: you can also use decisive.DecisiveApiClient
session = requests.Session()
session.auth = (API_KEY,'')
API_HOST = 'https://ads.decisive.is'
print
print 'Selecting ads...'
ads = get('ads', offset=1, limit=5, approved='true')
ad_ids = [a['ad_id'] for a in ads]
print 'selected', ad_ids
print
print 'Creating retargeting campaign...'
to_retargeting_id = lambda a: 'clicks_{}'.format(ad['ad_id'])
# TODO: fill in your own campaign details
new_ad = {'url':'http://google.com',
'name':'example ad name',
'budget':1984,
'bidmode':'Manual',
'cpm_bid':3.1415,
'creative_urls':['https://www.google.com.au/images/srpr/logo11w.png'],
'start_date':datetime.datetime.now().isoformat(),
'end_date':datetime.datetime.now().isoformat(),
'blacklist':{'country':['Canada','France'],'site': ['tmz.com', 'dogecoin.com']}
}
ad['targeting'] = {'device_groups':map(to_retargeting_id, ad_ids)} # NOTE: retargeting
print post(ad, 'ads')
| 30.507463 | 95 | 0.65362 | import sys # NOTE: for exiting
import requests
import datetime
import pprint
import ujson as json # NOTE: faster json
API_KEY = '' # TODO: input api key here!!!
if not API_KEY:
sys.exit('Please insert your Decisive API key')
print
print 'Creating session to always add API key...'
# NOTE: you can also use decisive.DecisiveApiClient
session = requests.Session()
session.auth = (API_KEY,'')
API_HOST = 'https://ads.decisive.is'
def to_uri(*paths, **get_args):
path = '/'.join(p.strip('/') if isinstance(p,(str,unicode)) else unicode(p) for p in paths)
args = '&'.join('{}={}'.format(*i) for i in get_args.items())
return '{}/{}?{}'.format(API_HOST, path, args)
def get(*paths, **get_args):
uri = to_uri(*paths, **get_args)
response = session.get(uri)
response.raise_for_status()
return response.json()
def put(updated_ad):
uri = to_uri('ads',updated_ad['ad_id'])
response = session.put(uri, data=json.dumps(updated_ad))
response.raise_for_status()
return True
def post(data, *paths):
uri = to_uri(*paths)
response = session.post(uri, data=json.dumps(data))
response.raise_for_status()
return response.json()
print
print 'Selecting ads...'
ads = get('ads', offset=1, limit=5, approved='true')
ad_ids = [a['ad_id'] for a in ads]
print 'selected', ad_ids
print
print 'Creating retargeting campaign...'
to_retargeting_id = lambda a: 'clicks_{}'.format(ad['ad_id'])
# TODO: fill in your own campaign details
new_ad = {'url':'http://google.com',
'name':'example ad name',
'budget':1984,
'bidmode':'Manual',
'cpm_bid':3.1415,
'creative_urls':['https://www.google.com.au/images/srpr/logo11w.png'],
'start_date':datetime.datetime.now().isoformat(),
'end_date':datetime.datetime.now().isoformat(),
'blacklist':{'country':['Canada','France'],'site': ['tmz.com', 'dogecoin.com']}
}
ad['targeting'] = {'device_groups':map(to_retargeting_id, ad_ids)} # NOTE: retargeting
print post(ad, 'ads')
| 652 | 0 | 91 |
9975b4944c84ea643a71a8fe95b55e6ebcf03607 | 342 | py | Python | taaontia/commands/registry.py | Kagaminara/taaontia.py | 5f31f22fd26767903f8351eab209848fe1ddb6b0 | [
"MIT"
] | null | null | null | taaontia/commands/registry.py | Kagaminara/taaontia.py | 5f31f22fd26767903f8351eab209848fe1ddb6b0 | [
"MIT"
] | null | null | null | taaontia/commands/registry.py | Kagaminara/taaontia.py | 5f31f22fd26767903f8351eab209848fe1ddb6b0 | [
"MIT"
] | null | null | null |
commands = CommandsRegistry()
| 21.375 | 56 | 0.652047 | class CommandsRegistry:
def __init__(self):
self.commands = {}
def register(self, command):
self.commands[command.trigger] = command()
return command
def get(self, command):
from .helper import HelpCommand
return self.commands.get(command, HelpCommand())
commands = CommandsRegistry()
| 206 | 2 | 102 |
de76335bf95488a9cd521c5b94630a6f3bfab345 | 24,150 | py | Python | src/graph_transpiler/webdnn/frontend/tensorflow/ops/gen_nn_ops.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | 1 | 2021-04-09T15:55:35.000Z | 2021-04-09T15:55:35.000Z | src/graph_transpiler/webdnn/frontend/tensorflow/ops/gen_nn_ops.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | null | null | null | src/graph_transpiler/webdnn/frontend/tensorflow/ops/gen_nn_ops.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | null | null | null | from typing import Tuple, List
import numpy as np
import tensorflow as tf
from webdnn.frontend.tensorflow.converter import TensorFlowConverter
from webdnn.frontend.tensorflow.util import unary_op_handler, check_data_format, convert_odd_padding_to_concat, parse_padding
from webdnn.graph.axis import Axis
from webdnn.graph.operators.average_pooling_2d import AveragePooling2D
from webdnn.graph.operators.clipped_relu import ClippedRelu
from webdnn.graph.operators.concat import Concat
from webdnn.graph.operators.convolution2d import Convolution2D
from webdnn.graph.operators.deconvolution2d import Deconvolution2D
from webdnn.graph.operators.elu import Elu
from webdnn.graph.operators.max_pooling_2d import MaxPooling2D
from webdnn.graph.operators.relu import Relu
from webdnn.graph.operators.softmax import Softmax
from webdnn.graph.operators.softplus import Softplus
from webdnn.graph.operators.softsign import Softsign
from webdnn.graph.order import Order
from webdnn.graph.variables.constant_variable import ConstantVariable
from webdnn.util import console
@TensorFlowConverter.register_handler("AvgPool")
@TensorFlowConverter.register_handler("AvgPool3D")
@TensorFlowConverter.register_handler("AvgPool3DGrad")
@TensorFlowConverter.register_handler("AvgPoolGrad")
@TensorFlowConverter.register_handler("BatchNormWithGlobalNormalization")
@TensorFlowConverter.register_handler("BatchNormWithGlobalNormalizationGrad")
@TensorFlowConverter.register_handler("BiasAdd")
@TensorFlowConverter.register_handler("BiasAddGrad")
@TensorFlowConverter.register_handler("BiasAddV1")
@TensorFlowConverter.register_handler("Conv2D")
@TensorFlowConverter.register_handler("Conv2DBackpropFilter")
@TensorFlowConverter.register_handler("Conv2DBackpropInput")
@TensorFlowConverter.register_handler("Conv3D")
@TensorFlowConverter.register_handler("Conv3DBackpropFilter")
@TensorFlowConverter.register_handler("Conv3DBackpropFilterV2")
@TensorFlowConverter.register_handler("Conv3DBackpropInput")
@TensorFlowConverter.register_handler("Conv3DBackpropInputV2")
@TensorFlowConverter.register_handler("DepthwiseConv2dNative")
@TensorFlowConverter.register_handler("DepthwiseConv2dNativeBackpropFilter")
@TensorFlowConverter.register_handler("DepthwiseConv2dNativeBackpropInput")
@TensorFlowConverter.register_handler("Dilation2D")
@TensorFlowConverter.register_handler("Dilation2DBackpropFilter")
@TensorFlowConverter.register_handler("Dilation2DBackpropInput")
TensorFlowConverter.register_handler("Elu")(unary_op_handler(Elu))
@TensorFlowConverter.register_handler("EluGrad")
@TensorFlowConverter.register_handler("FractionalAvgPoolGrad")
@TensorFlowConverter.register_handler("FractionalMaxPoolGrad")
@TensorFlowConverter.register_handler("FusedBatchNorm")
@TensorFlowConverter.register_handler("FusedPadConv2D")
@TensorFlowConverter.register_handler("FusedResizeAndPadConv2D")
@TensorFlowConverter.register_handler("InTopK")
@TensorFlowConverter.register_handler("InTopKV2")
@TensorFlowConverter.register_handler("L2Loss")
@TensorFlowConverter.register_handler("LRN")
@TensorFlowConverter.register_handler("LRNGrad")
@TensorFlowConverter.register_handler("LogSoftmax")
@TensorFlowConverter.register_handler("MaxPool")
@TensorFlowConverter.register_handler("MaxPool3D")
@TensorFlowConverter.register_handler("MaxPool3DGrad")
@TensorFlowConverter.register_handler("MaxPool3DGradGrad")
@TensorFlowConverter.register_handler("MaxPoolGrad")
@TensorFlowConverter.register_handler("MaxPoolGradGrad")
@TensorFlowConverter.register_handler("MaxPoolGradGradWithArgmax")
@TensorFlowConverter.register_handler("MaxPoolGradWithArgmax")
@TensorFlowConverter.register_handler("MaxPoolWithArgmax")
@TensorFlowConverter.register_handler("QuantizedAvgPool")
@TensorFlowConverter.register_handler("QuantizedBatchNormWithGlobalNormalization")
@TensorFlowConverter.register_handler("QuantizedBiasAdd")
@TensorFlowConverter.register_handler("QuantizedConv2D")
@TensorFlowConverter.register_handler("QuantizedMaxPool")
@TensorFlowConverter.register_handler("QuantizedRelu")
@TensorFlowConverter.register_handler("QuantizedRelu6")
@TensorFlowConverter.register_handler("QuantizedReluX")
TensorFlowConverter.register_handler("Relu")(unary_op_handler(Relu))
@TensorFlowConverter.register_handler("Relu6")
@TensorFlowConverter.register_handler("Relu6Grad")
@TensorFlowConverter.register_handler("ReluGrad")
@TensorFlowConverter.register_handler("Softmax")
@TensorFlowConverter.register_handler("SoftmaxCrossEntropyWithLogits")
@TensorFlowConverter.register_handler("Softplus")
@TensorFlowConverter.register_handler("SoftplusGrad")
TensorFlowConverter.register_handler("Softsign")(unary_op_handler(Softsign))
@TensorFlowConverter.register_handler("SoftsignGrad")
@TensorFlowConverter.register_handler("SparseSoftmaxCrossEntropyWithLogits")
@TensorFlowConverter.register_handler("TopK")
@TensorFlowConverter.register_handler("TopKV2")
| 42.592593 | 135 | 0.755735 | from typing import Tuple, List
import numpy as np
import tensorflow as tf
from webdnn.frontend.tensorflow.converter import TensorFlowConverter
from webdnn.frontend.tensorflow.util import unary_op_handler, check_data_format, convert_odd_padding_to_concat, parse_padding
from webdnn.graph.axis import Axis
from webdnn.graph.operators.average_pooling_2d import AveragePooling2D
from webdnn.graph.operators.clipped_relu import ClippedRelu
from webdnn.graph.operators.concat import Concat
from webdnn.graph.operators.convolution2d import Convolution2D
from webdnn.graph.operators.deconvolution2d import Deconvolution2D
from webdnn.graph.operators.elu import Elu
from webdnn.graph.operators.max_pooling_2d import MaxPooling2D
from webdnn.graph.operators.relu import Relu
from webdnn.graph.operators.softmax import Softmax
from webdnn.graph.operators.softplus import Softplus
from webdnn.graph.operators.softsign import Softsign
from webdnn.graph.order import Order
from webdnn.graph.variables.constant_variable import ConstantVariable
from webdnn.util import console
@TensorFlowConverter.register_handler("AvgPool")
def avg_pool_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
x = converter.get_variable(tf_op.inputs[0])
data_format = tf_op.get_attr("data_format")
check_data_format(x, data_format)
ksize = tuple(tf_op.get_attr("ksize")) # type: Tuple[int,...]
assert ksize[x.order.axes_dict[Axis.N]] == 1
assert ksize[x.order.axes_dict[Axis.C]] == 1
ksize = (ksize[x.order.axes_dict[Axis.H]], ksize[x.order.axes_dict[Axis.W]])
stride = tuple(tf_op.get_attr("strides")) # type: Tuple[int,...]
assert stride[x.order.axes_dict[Axis.N]] == 1
assert stride[x.order.axes_dict[Axis.C]] == 1
stride = (stride[x.order.axes_dict[Axis.H]], stride[x.order.axes_dict[Axis.W]])
padding = (
parse_padding(tf_op.get_attr("padding"), ksize[0], 1),
parse_padding(tf_op.get_attr("padding"), ksize[1], 1),
)
x, padding = convert_odd_padding_to_concat(x, padding=padding)
if any(p > 0 for p in padding):
console.warning(
"[KerasConverter] keras.layers.AveragePooling computes average by dividing number of valid elements in window "
"(without padding element), but WebDNN divides it by the number of elements including padding element, so different "
"result will be generated on the edge.")
y, = AveragePooling2D(None, ksize=ksize, stride=stride, padding=padding, cover_all=False)(x)
converter.set_variable(tf_op.outputs[0], y)
@TensorFlowConverter.register_handler("AvgPool3D")
def avg_pool3_d_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
# FIXME
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("AvgPool3DGrad")
def avg_pool3_d_grad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("AvgPoolGrad")
def avg_pool_grad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("BatchNormWithGlobalNormalization")
def batch_norm_with_global_normalization_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
# FIXME
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("BatchNormWithGlobalNormalizationGrad")
def batch_norm_with_global_normalization_grad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("BiasAdd")
def bias_add_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
x = converter.get_variable(tf_op.inputs[0])
b = converter.get_variable(tf_op.inputs[1])
data_format = tf_op.get_attr("data_format")
if data_format == b"NCHW":
b.order.axes[0].unify(x.order.axes[1])
elif data_format == b"NHWC":
b.order.axes[0].unify(x.order.axes[-1])
else:
raise NotImplementedError("Unknown data format")
y = x + b
converter.set_variable(tf_op.outputs[0], y)
@TensorFlowConverter.register_handler("BiasAddGrad")
def bias_add_grad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("BiasAddV1")
def bias_add_v1_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
# FIXME
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Conv2D")
def conv2_d_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
x = converter.get_variable(tf_op.inputs[0])
data_format = tf_op.get_attr("data_format")
check_data_format(x, data_format)
w = converter.get_variable(tf_op.inputs[1]) # HWCN
w.order.unify(Order([Axis.KH, Axis.KW, Axis.C, Axis.N]))
ksize = (w.shape_dict[Axis.KH], w.shape_dict[Axis.KW])
stride = tuple(tf_op.get_attr("strides")) # type: Tuple[int,...]
assert stride[x.order.axes_dict[Axis.N]] == 1
assert stride[x.order.axes_dict[Axis.C]] == 1
stride = (stride[x.order.axes_dict[Axis.H]], stride[x.order.axes_dict[Axis.W]])
input_size = np.array([x.shape_dict[Axis.H], x.shape_dict[Axis.W]])
padding = np.array([
parse_padding(tf_op.get_attr("padding"), ksize[0], 1),
parse_padding(tf_op.get_attr("padding"), ksize[1], 1)
])
apron_size = (input_size + padding.sum(axis=1) - ksize) % stride
# cancel padding by apron if possible
for i in (0, 1):
if padding[i, 0] > apron_size[i]:
padding[i, 0] -= apron_size[i]
apron_size[i] = 0
else:
apron_size[i] -= padding[i, 0]
padding[i, 0] = 0
if padding[i, 1] > apron_size[i]:
padding[i, 1] -= apron_size[i]
apron_size[i] = 0
else:
apron_size[i] -= padding[i, 1]
padding[i, 1] = 0
padding_list = padding.tolist() # type: List[Tuple[int, int]]
x, padding = convert_odd_padding_to_concat(x, padding=padding_list)
y, = Convolution2D(None, ksize=ksize, stride=stride, padding=padding)(x, w)
converter.set_variable(tf_op.outputs[0], y)
@TensorFlowConverter.register_handler("Conv2DBackpropFilter")
def conv2_d_backprop_filter_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Conv2DBackpropInput")
def conv2_d_backprop_input_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
input_sizes = converter.get_variable(tf_op.inputs[0])
if not isinstance(input_sizes, ConstantVariable):
raise NotImplementedError(
"[TensorFlowConverter] Conv2DBackpropInput with dynamic shape of output (input of convolution) variable is not supported.")
input_sizes = tuple(input_sizes.data.astype(np.int32).tolist())
w = converter.get_variable(tf_op.inputs[1]) # HWNC
w.order.unify(Order([Axis.KH, Axis.KW, Axis.N, Axis.C]))
gy = converter.get_variable(tf_op.inputs[2]) # NHWC
data_format = tf_op.get_attr("data_format")
check_data_format(gy, data_format)
input_size = np.array([input_sizes[gy.order.axes_dict[Axis.H]], input_sizes[gy.order.axes_dict[Axis.W]]])
ksize = np.array([w.shape_dict[Axis.KH], w.shape_dict[Axis.KW]])
stride = np.array(tf_op.get_attr("strides"))
assert stride[gy.order.axes_dict[Axis.N]] == 1
assert stride[gy.order.axes_dict[Axis.C]] == 1
stride = stride[[gy.order.axes_dict[Axis.H], gy.order.axes_dict[Axis.W]]]
padding = np.array([
parse_padding(tf_op.get_attr("padding"), ksize[0], 1),
parse_padding(tf_op.get_attr("padding"), ksize[1], 1)
])
x, = Deconvolution2D(None, ksize=ksize.tolist(), stride=stride.tolist(), padding=0)(gy, w)
# Actual padding size is depend on 2 factors
# 1. padding mode
# 2. extra apron size (= (input size of convolution) - (size of the tensor expanded by deconvolution))
expanded_size = np.array([x.shape_dict[Axis.H], x.shape_dict[Axis.W]])
apron_size = input_size - (expanded_size - padding.sum(axis=1))
# cancel padding by apron if possible
for i in (0, 1):
if padding[i, 0] > apron_size[i]:
padding[i, 0] -= apron_size[i]
apron_size[i] = 0
else:
apron_size[i] -= padding[i, 0]
padding[i, 0] = 0
if padding[i, 1] > apron_size[i]:
padding[i, 1] -= apron_size[i]
apron_size[i] = 0
else:
apron_size[i] -= padding[i, 1]
padding[i, 1] = 0
# append extra apron
for i, axis in enumerate((Axis.H, Axis.W)):
if apron_size[i] == 0:
continue
data = np.zeros([apron_size[i] if a == axis else x.shape_dict[a] for a in x.order.axes])
x, = Concat(None, axis=axis)(x, ConstantVariable(data, x.order))
# crop without padding
padding_list = padding.tolist() # type: List[List[int]]
slice_h = slice(None) if padding_list[0] == [0, 0] else slice(padding_list[0][0], -padding_list[0][1])
slice_w = slice(None) if padding_list[1] == [0, 0] else slice(padding_list[1][0], -padding_list[1][1])
if data_format == b"NCHW":
x = x[:, :, slice_h, slice_w]
elif data_format == b"NHWC":
x = x[:, slice_h, slice_w, :]
else:
raise NotImplementedError(f"Unknown data format: {data_format}")
converter.set_variable(tf_op.outputs[0], x)
@TensorFlowConverter.register_handler("Conv3D")
def conv3_d_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Conv3DBackpropFilter")
def conv3_d_backprop_filter_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Conv3DBackpropFilterV2")
def conv3_d_backprop_filter_v2_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Conv3DBackpropInput")
def conv3_d_backprop_input_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Conv3DBackpropInputV2")
def conv3_d_backprop_input_v2_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("DepthwiseConv2dNative")
def depthwise_conv2d_native_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
# FIXME
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("DepthwiseConv2dNativeBackpropFilter")
def depthwise_conv2d_native_backprop_filter_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("DepthwiseConv2dNativeBackpropInput")
def depthwise_conv2d_native_backprop_input_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Dilation2D")
def dilation2_d_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
# FIXME
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Dilation2DBackpropFilter")
def dilation2_d_backprop_filter_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Dilation2DBackpropInput")
def dilation2_d_backprop_input_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
TensorFlowConverter.register_handler("Elu")(unary_op_handler(Elu))
@TensorFlowConverter.register_handler("EluGrad")
def elu_grad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("FractionalAvgPoolGrad")
def fractional_avg_pool_grad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("FractionalMaxPoolGrad")
def fractional_max_pool_grad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("FusedBatchNorm")
def fused_batch_norm_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
x = converter.get_variable(tf_op.inputs[0])
scale = converter.get_variable(tf_op.inputs[1])
offset = converter.get_variable(tf_op.inputs[2])
mean = converter.get_variable(tf_op.inputs[3])
variance = converter.get_variable(tf_op.inputs[4])
epsilon = tf_op.get_attr("epsilon")
data_format = tf_op.get_attr("data_format")
if data_format == b"NHWC":
channel_axis = x.order.axes[3]
elif data_format == b"NCHW":
channel_axis = x.order.axes[1]
else:
raise NotImplementedError("Unknown data format")
scale.order.axes[0].unify(channel_axis)
offset.order.axes[0].unify(channel_axis)
mean.order.axes[0].unify(channel_axis)
variance.order.axes[0].unify(channel_axis)
y = (x - mean) / ((variance + epsilon) ** 0.5) * scale + offset
converter.set_variable(tf_op.outputs[0], y)
@TensorFlowConverter.register_handler("FusedPadConv2D")
def fused_pad_conv2_d_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
# FIXME
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("FusedResizeAndPadConv2D")
def fused_resize_and_pad_conv2_d_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
# FIXME
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("InTopK")
def in_top_k_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("InTopKV2")
def in_top_kv2_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("L2Loss")
def l2_loss_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("LRN")
def lrn_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
# FIXME
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("LRNGrad")
def lrn_grad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("LogSoftmax")
def log_softmax_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
# FIXME
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("MaxPool")
def max_pool_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
x = converter.get_variable(tf_op.inputs[0])
data_format = tf_op.get_attr("data_format")
check_data_format(x, data_format)
ksize = tuple(tf_op.get_attr("ksize")) # type: Tuple[int,...]
assert ksize[x.order.axes_dict[Axis.N]] == 1
assert ksize[x.order.axes_dict[Axis.C]] == 1
ksize = (ksize[x.order.axes_dict[Axis.H]], ksize[x.order.axes_dict[Axis.W]])
stride = tuple(tf_op.get_attr("strides")) # type: Tuple[int,...]
assert stride[x.order.axes_dict[Axis.N]] == 1
assert stride[x.order.axes_dict[Axis.C]] == 1
stride = (stride[x.order.axes_dict[Axis.H]], stride[x.order.axes_dict[Axis.W]])
padding = (
parse_padding(tf_op.get_attr("padding"), ksize[0], 1),
parse_padding(tf_op.get_attr("padding"), ksize[1], 1),
)
x, padding = convert_odd_padding_to_concat(x, padding=padding, value=-1.0e10)
y, = MaxPooling2D(None, ksize=ksize, stride=stride, padding=padding, cover_all=False)(x)
converter.set_variable(tf_op.outputs[0], y)
@TensorFlowConverter.register_handler("MaxPool3D")
def max_pool3_d_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
# FIXME
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("MaxPool3DGrad")
def max_pool3_d_grad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("MaxPool3DGradGrad")
def max_pool3_d_grad_grad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("MaxPoolGrad")
def max_pool_grad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("MaxPoolGradGrad")
def max_pool_grad_grad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("MaxPoolGradGradWithArgmax")
def max_pool_grad_grad_with_argmax_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("MaxPoolGradWithArgmax")
def max_pool_grad_with_argmax_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("MaxPoolWithArgmax")
def max_pool_with_argmax_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("QuantizedAvgPool")
def quantized_avg_pool_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("QuantizedBatchNormWithGlobalNormalization")
def quantized_batch_norm_with_global_normalization_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("QuantizedBiasAdd")
def quantized_bias_add_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("QuantizedConv2D")
def quantized_conv2_d_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("QuantizedMaxPool")
def quantized_max_pool_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("QuantizedRelu")
def quantized_relu_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("QuantizedRelu6")
def quantized_relu6_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("QuantizedReluX")
def quantized_relu_x_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
TensorFlowConverter.register_handler("Relu")(unary_op_handler(Relu))
@TensorFlowConverter.register_handler("Relu6")
def relu6_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
x = converter.get_variable(tf_op.inputs[0])
y, = ClippedRelu(None, cap=6)(x)
converter.set_variable(tf_op.outputs[0], y)
@TensorFlowConverter.register_handler("Relu6Grad")
def relu6_grad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("ReluGrad")
def relu_grad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Softmax")
def softmax_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
x = converter.get_variable(tf_op.inputs[0])
y, = Softmax(None, axis=x.order.axes[-1])(x)
converter.set_variable(tf_op.outputs[0], y)
@TensorFlowConverter.register_handler("SoftmaxCrossEntropyWithLogits")
def softmax_cross_entropy_with_logits_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Softplus")
def softplus_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
x = converter.get_variable(tf_op.inputs[0])
y, = Softplus(None, beta=1)(x)
converter.set_variable(tf_op.outputs[0], y)
@TensorFlowConverter.register_handler("SoftplusGrad")
def softplus_grad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
TensorFlowConverter.register_handler("Softsign")(unary_op_handler(Softsign))
@TensorFlowConverter.register_handler("SoftsignGrad")
def softsign_grad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("SparseSoftmaxCrossEntropyWithLogits")
def sparse_softmax_cross_entropy_with_logits_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("TopK")
def top_k_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("TopKV2")
def top_kv2_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
| 17,740 | 0 | 1,386 |
7162d76d7a0f15ece55362aaf305b6e9001bee7f | 673 | py | Python | GF2.py | JediKoder/coursera-CodeMatrix | 1ac461d22ebaf2777eabdcf31d76d709c33f472a | [
"MIT"
] | 6 | 2015-09-18T02:07:21.000Z | 2020-04-22T17:05:11.000Z | GF2.py | JediKoder/coursera-CodeMatrix | 1ac461d22ebaf2777eabdcf31d76d709c33f472a | [
"MIT"
] | null | null | null | GF2.py | JediKoder/coursera-CodeMatrix | 1ac461d22ebaf2777eabdcf31d76d709c33f472a | [
"MIT"
] | 10 | 2015-09-05T03:54:00.000Z | 2020-04-21T12:56:40.000Z | from numbers import Number
one = One()
zero = 0
| 25.884615 | 62 | 0.644874 | from numbers import Number
class One:
def __add__(self, other): return self if other == 0 else 0
__sub__ = __add__
def __mul__(self, other):
if isinstance(other, Number):
return 0 if other == 0 else self
return other
def __div__(self, other):
if other == 0: raise ZeroDivisionError
return self
__truediv__ = __div__
def __rdiv__(self,other): return other
__rtruediv__ = __rdiv__
__radd__ = __add__
__rsub__ = __add__
__rmul__ = __mul__
def __str__(self): return 'one'
__repr__ = __str__
def __neg__(self): return self
def __bool__(self): return True
one = One()
zero = 0
| 262 | 339 | 23 |
1cf508199cbd0764682bf5bdcd24b98f81e53734 | 1,151 | py | Python | tonks/vision/helpers.py | vanderveld/tonks | e87afbd9614b276b443b4a7527fd1fda01a8be4c | [
"BSD-3-Clause"
] | null | null | null | tonks/vision/helpers.py | vanderveld/tonks | e87afbd9614b276b443b4a7527fd1fda01a8be4c | [
"BSD-3-Clause"
] | null | null | null | tonks/vision/helpers.py | vanderveld/tonks | e87afbd9614b276b443b4a7527fd1fda01a8be4c | [
"BSD-3-Clause"
] | null | null | null | from creevey.ops.image import centercrop
import numpy as np
from PIL import Image
import torch.nn as nn
def center_crop_pil_image(img):
"""
Helper function to crop the center out of images.
Utilizes the centercrop function from `creevey`
Parameters
----------
img: array
PIL image array
Returns
-------
PIL.Image: Slice of input image corresponding to a cropped area around the center
"""
img = np.array(img)
cropped_img = centercrop(img, reduction_factor=.4)
return Image.fromarray(cropped_img)
class _Identity(nn.Module):
"""
Used to pass penultimate layer features to the the ensemble
Motivation for this is that the features from the penultimate layer
are likely more informative than the 1000 way softmax that was used
in the multi_output_model_v2.
"""
| 24.489362 | 85 | 0.638575 | from creevey.ops.image import centercrop
import numpy as np
from PIL import Image
import torch.nn as nn
def center_crop_pil_image(img):
"""
Helper function to crop the center out of images.
Utilizes the centercrop function from `creevey`
Parameters
----------
img: array
PIL image array
Returns
-------
PIL.Image: Slice of input image corresponding to a cropped area around the center
"""
img = np.array(img)
cropped_img = centercrop(img, reduction_factor=.4)
return Image.fromarray(cropped_img)
class _Identity(nn.Module):
"""
Used to pass penultimate layer features to the the ensemble
Motivation for this is that the features from the penultimate layer
are likely more informative than the 1000 way softmax that was used
in the multi_output_model_v2.
"""
def __init__(self):
super().__init__()
def forward(self, x):
return x
def _dense_block(in_f, out_f, reg):
return nn.Sequential(nn.Linear(in_f, out_f),
nn.BatchNorm1d(out_f, eps=reg),
nn.ReLU()
)
| 224 | 0 | 76 |
711d9ac3055874d62d06bdb20403fb0a203b8348 | 8,608 | py | Python | cornflow_client/schema/manager.py | baobabsoluciones/cornflow-client | f9996f0b841885d26639cb63c8ba6090387de57f | [
"MIT"
] | 3 | 2021-05-12T11:21:26.000Z | 2022-02-22T19:23:46.000Z | cornflow_client/schema/manager.py | baobabsoluciones/cornflow-client | f9996f0b841885d26639cb63c8ba6090387de57f | [
"MIT"
] | 17 | 2021-03-14T17:09:46.000Z | 2022-02-28T19:12:37.000Z | cornflow_client/schema/manager.py | baobabsoluciones/cornflow-client | f9996f0b841885d26639cb63c8ba6090387de57f | [
"MIT"
] | 2 | 2020-10-03T20:00:19.000Z | 2022-03-24T11:52:22.000Z | """
Class to help create and manage data schema and to validate json files.
"""
from jsonschema import Draft7Validator
from copy import deepcopy
from genson import SchemaBuilder
from .dictSchema import DictSchema
from cornflow_client.core.tools import load_json, save_json
class SchemaManager:
"""
A schema manager between json-schema, dict-schema and marshmallow
"""
def __init__(self, schema, validator=Draft7Validator):
"""
Class to help create and manage data schema.
Once a schema is loaded, allow the validation of data.
:param schema: a json schema
"""
self.validator = validator
self.jsonschema = schema
@classmethod
def from_filepath(cls, path):
"""
Load a json schema from a json file.
:param path the file path
return The SchemaManager instance
"""
schema = cls.load_json(path)
return cls(schema)
def get_jsonschema(self):
"""
Return a copy of the stored jsonschema.
"""
return deepcopy(self.jsonschema)
def get_validation_errors(self, data):
"""
Validate json data according to the loaded jsonschema and return a list of errors.
Return an empty list if data is valid.
:param dict data: data to validate.
:return: A list of validation errors.
For more details about the error format, see:
https://python-jsonschema.readthedocs.io/en/latest/errors/#jsonschema.exceptions.ValidationError
"""
v = self.validator(self.get_jsonschema())
if not v.is_valid(data):
error_list = [e for e in v.iter_errors(data)]
return error_list
return []
def validate_data(self, data, print_errors=False):
"""
Validate json data according to the loaded jsonschema.
:param dict data: the data to validate.
:param bool print_errors: If true, will print the errors.
:return: True if data format is valid, else False.
"""
errors_list = self.get_validation_errors(data)
if print_errors:
for e in errors_list:
print(e)
return len(errors_list) == 0
def get_file_errors(self, path):
"""
Get json file errors according to the loaded jsonschema.
:param path the file path
:return: A list of validation errors.
For more details about the error format, see:
https://python-jsonschema.readthedocs.io/en/latest/errors/#jsonschema.exceptions.ValidationError
"""
data = self.load_json(path)
return self.get_validation_errors(data)
def validate_file(self, path, print_errors=False):
"""
Validate a json file according to the loaded jsonschema.
:param path the file path
:param print_errors: If true, will print the errors.
:return: True if the data is valid and False if it is not.
"""
data = self.load_json(path)
return self.validate_data(data, print_errors=print_errors)
def to_dict_schema(self):
"""
Transform a jsonschema into a dictionary format
:return: The schema dictionary
"""
return self.to_schema_dict_obj().get_schema()
def to_schema_dict_obj(self):
"""
Returns an DictSchema object equivalent of the jsonschema
"""
return DictSchema(self.get_jsonschema())
@property
def to_marshmallow(self):
"""
Create marshmallow schemas
:return: a dict containing the flask marshmallow schemas
:rtype: Schema()
"""
return self.to_schema_dict_obj().to_marshmallow()
def export_schema_dict(self, path):
"""
Print the schema_dict in a json file.
:param path: the path where to save the dict.format
:return: nothing
"""
self.save_json(self.to_dict_schema(), path)
def draft_schema_from(self, path, save_path=None):
"""
Create a draft jsonschema from a json file of data.
:param path: path to the json file.
:param save_path: path where to save the generated schema.
:return: the generated schema.
"""
file = self.load_json(path)
builder = SchemaBuilder()
builder.add_schema({"type": "object", "properties": {}})
builder.add_object(file)
draft_schema = builder.to_json()
if save_path is not None:
with open(save_path, "w") as outfile:
outfile.write(draft_schema)
return draft_schema
def to_template(self):
"""
This function assumes certain structure for the jsonschema.
For now, three types of tables exist: array of objects, arrays and objects.
{
table1: [{col1: a, col2: b}, {col1: aa, col2: bb}, ...],
table2: [1, 2, 3, ],
table3: {config1: a, config2: b},
}
"""
master_table_name = "_README"
type_table_name = "_TYPES"
tables = {master_table_name: [], type_table_name: []}
# we update the master table of tables:
real_props = [
(k, v)
for (k, v) in self.jsonschema["properties"].items()
if not k.startswith("$")
]
for key, value in real_props:
tables[master_table_name].append(
dict(table=key, description=value.get("description", ""))
)
# then we get each table
for key, value in real_props:
tables[key] = self._get_table(value)
# then we get column types
example_inv = {1: "integer", "string": "string"}
for key, value in real_props:
rows = []
if len(tables[key]) > 1:
rows = [
dict(table=key, column=v, type="string") for v in ["key", "value"]
]
if len(tables[key]) == 1:
row1 = tables[key][0]
rows = [
dict(table=key, column=k, type=example_inv[v])
for k, v in row1.items()
]
tables[type_table_name].extend(rows)
return tables
@staticmethod
@staticmethod
def load_json(path):
"""
Load a json file
:param path: the path of the json file.json
return the json content.
"""
return load_json(path)
@staticmethod
"""
Aliases:
"""
dict_to_flask = to_marshmallow
load_schema = from_filepath
jsonschema_to_flask = to_marshmallow
jsonschema_to_dict = to_dict_schema
| 31.188406 | 106 | 0.580507 | """
Class to help create and manage data schema and to validate json files.
"""
from jsonschema import Draft7Validator
from copy import deepcopy
from genson import SchemaBuilder
from .dictSchema import DictSchema
from cornflow_client.core.tools import load_json, save_json
class SchemaManager:
"""
A schema manager between json-schema, dict-schema and marshmallow
"""
def __init__(self, schema, validator=Draft7Validator):
"""
Class to help create and manage data schema.
Once a schema is loaded, allow the validation of data.
:param schema: a json schema
"""
self.validator = validator
self.jsonschema = schema
@classmethod
def from_filepath(cls, path):
"""
Load a json schema from a json file.
:param path the file path
return The SchemaManager instance
"""
schema = cls.load_json(path)
return cls(schema)
def get_jsonschema(self):
"""
Return a copy of the stored jsonschema.
"""
return deepcopy(self.jsonschema)
def get_validation_errors(self, data):
"""
Validate json data according to the loaded jsonschema and return a list of errors.
Return an empty list if data is valid.
:param dict data: data to validate.
:return: A list of validation errors.
For more details about the error format, see:
https://python-jsonschema.readthedocs.io/en/latest/errors/#jsonschema.exceptions.ValidationError
"""
v = self.validator(self.get_jsonschema())
if not v.is_valid(data):
error_list = [e for e in v.iter_errors(data)]
return error_list
return []
def validate_data(self, data, print_errors=False):
"""
Validate json data according to the loaded jsonschema.
:param dict data: the data to validate.
:param bool print_errors: If true, will print the errors.
:return: True if data format is valid, else False.
"""
errors_list = self.get_validation_errors(data)
if print_errors:
for e in errors_list:
print(e)
return len(errors_list) == 0
def get_file_errors(self, path):
"""
Get json file errors according to the loaded jsonschema.
:param path the file path
:return: A list of validation errors.
For more details about the error format, see:
https://python-jsonschema.readthedocs.io/en/latest/errors/#jsonschema.exceptions.ValidationError
"""
data = self.load_json(path)
return self.get_validation_errors(data)
def validate_file(self, path, print_errors=False):
"""
Validate a json file according to the loaded jsonschema.
:param path the file path
:param print_errors: If true, will print the errors.
:return: True if the data is valid and False if it is not.
"""
data = self.load_json(path)
return self.validate_data(data, print_errors=print_errors)
def to_dict_schema(self):
"""
Transform a jsonschema into a dictionary format
:return: The schema dictionary
"""
return self.to_schema_dict_obj().get_schema()
def to_schema_dict_obj(self):
"""
Returns an DictSchema object equivalent of the jsonschema
"""
return DictSchema(self.get_jsonschema())
@property
def schema_dict(self):
return self.to_dict_schema()
def to_marshmallow(self):
"""
Create marshmallow schemas
:return: a dict containing the flask marshmallow schemas
:rtype: Schema()
"""
return self.to_schema_dict_obj().to_marshmallow()
def export_schema_dict(self, path):
"""
Print the schema_dict in a json file.
:param path: the path where to save the dict.format
:return: nothing
"""
self.save_json(self.to_dict_schema(), path)
def draft_schema_from(self, path, save_path=None):
"""
Create a draft jsonschema from a json file of data.
:param path: path to the json file.
:param save_path: path where to save the generated schema.
:return: the generated schema.
"""
file = self.load_json(path)
builder = SchemaBuilder()
builder.add_schema({"type": "object", "properties": {}})
builder.add_object(file)
draft_schema = builder.to_json()
if save_path is not None:
with open(save_path, "w") as outfile:
outfile.write(draft_schema)
return draft_schema
def to_template(self):
"""
This function assumes certain structure for the jsonschema.
For now, three types of tables exist: array of objects, arrays and objects.
{
table1: [{col1: a, col2: b}, {col1: aa, col2: bb}, ...],
table2: [1, 2, 3, ],
table3: {config1: a, config2: b},
}
"""
master_table_name = "_README"
type_table_name = "_TYPES"
tables = {master_table_name: [], type_table_name: []}
# we update the master table of tables:
real_props = [
(k, v)
for (k, v) in self.jsonschema["properties"].items()
if not k.startswith("$")
]
for key, value in real_props:
tables[master_table_name].append(
dict(table=key, description=value.get("description", ""))
)
# then we get each table
for key, value in real_props:
tables[key] = self._get_table(value)
# then we get column types
example_inv = {1: "integer", "string": "string"}
for key, value in real_props:
rows = []
if len(tables[key]) > 1:
rows = [
dict(table=key, column=v, type="string") for v in ["key", "value"]
]
if len(tables[key]) == 1:
row1 = tables[key][0]
rows = [
dict(table=key, column=k, type=example_inv[v])
for k, v in row1.items()
]
tables[type_table_name].extend(rows)
return tables
@staticmethod
def _get_table(contents):
example = dict(integer=1, string="string")
# several cases here:
if contents["type"] == "object":
# two columns: key-value in two columns
properties = contents["properties"]
return [
dict(key=k, value=example[v["type"]]) for k, v in properties.items()
]
# we're here, we're probably in an array
assert contents["type"] == "array"
items = contents["items"]
if items["type"] != "object":
# only one column with name
return [example[items["type"]]]
# here is a regular table:
props = items["properties"]
# if there are array of single values, we flatten them into one column:
p_arrays = {
k: v["items"]
for k, v in props.items()
if v["type"] == "array" and v["items"]["type"] != "object"
}
# if a column is an array of objects: we flatten the object into several columns
p_arrays_objects = {
"{}.{}".format(k, kk): vv["items"]
for k, v in props.items()
if v["type"] == "array" and v["items"]["type"] == "object"
for kk, vv in v["items"]["properties"].items()
}
# the rest of columns stay the same
p_no_array = {k: v for k, v in props.items() if v["type"] != "array"}
props = {**p_arrays, **p_no_array, **p_arrays_objects}
required = items["required"]
rm_keys = props.keys() - set(required)
# order is: first required in order, then the rest:
one_line = {k: example[props[k]["type"]] for k in required}
for k in rm_keys:
one_line[k] = example[props[k]["type"]]
return [one_line]
@staticmethod
def load_json(path):
"""
Load a json file
:param path: the path of the json file.json
return the json content.
"""
return load_json(path)
@staticmethod
def save_json(data, path):
return save_json(data, path)
"""
Aliases:
"""
dict_to_flask = to_marshmallow
load_schema = from_filepath
jsonschema_to_flask = to_marshmallow
jsonschema_to_dict = to_dict_schema
| 1,835 | 0 | 78 |
0048849928d11e99a79abed4f15fdfd4ce533927 | 1,461 | py | Python | src/summary/trigger.py | latonaio/template-matching-summary-server | 2f0e792d38f897a08d6f792012dbfdad6d7c695b | [
"MIT"
] | 9 | 2021-09-22T07:17:02.000Z | 2021-11-05T01:26:19.000Z | src/summary/trigger.py | latonaio/template-matching-summary-server | 2f0e792d38f897a08d6f792012dbfdad6d7c695b | [
"MIT"
] | null | null | null | src/summary/trigger.py | latonaio/template-matching-summary-server | 2f0e792d38f897a08d6f792012dbfdad6d7c695b | [
"MIT"
] | null | null | null | from src import log
from src.summary.base import BaseSummary
if __name__ == "__main__":
import json
with open('json/C_TemplateMatchingSummary.json') as f:
_dict = json.load(f)
dicts = _dict['metadata'][0]['value']
summary = TriggerSummary()
should_be_reset = summary.should_be_reset(dicts)
log.print(should_be_reset)
if should_be_reset:
summary.reset()
summary.set(dicts)
trigger = summary.get_trigger()
log.print(trigger)
end = summary.get_end()
log.print(end)
metadata = summary.get_metadata()
log.print(metadata)
summary.stack()
| 22.828125 | 93 | 0.603696 | from src import log
from src.summary.base import BaseSummary
class TriggerSummary(BaseSummary):
def __init__(self):
self.vehicle_name = None
self.reset()
def reset(self):
super().reset()
return
def should_be_reset(self, dicts):
new_vehicle_name = None
if dicts[0]['templates']:
new_vehicle_name = dicts[0]['templates'][0]['vehicle_name']
if (self.vehicle_name or new_vehicle_name) and self.vehicle_name != new_vehicle_name:
self.vehicle_name = new_vehicle_name
return True
return False
def get_trigger(self):
res = {
'status': False,
'values': []
}
if len(self._template_dicts) == 0:
return res
# status
res['status'] = True
# values
res['values'] = self._template_dicts
return res
if __name__ == "__main__":
import json
with open('json/C_TemplateMatchingSummary.json') as f:
_dict = json.load(f)
dicts = _dict['metadata'][0]['value']
summary = TriggerSummary()
should_be_reset = summary.should_be_reset(dicts)
log.print(should_be_reset)
if should_be_reset:
summary.reset()
summary.set(dicts)
trigger = summary.get_trigger()
log.print(trigger)
end = summary.get_end()
log.print(end)
metadata = summary.get_metadata()
log.print(metadata)
summary.stack()
| 701 | 13 | 130 |
8ab448056412da458a047ede6337a58d5e53cdfb | 195 | py | Python | thrift/protocol/__init__.py | getlove555/getbotline | 639e157495849e12ac7dd4bae6012841cf511892 | [
"MIT"
] | 7 | 2020-04-30T09:03:36.000Z | 2021-02-21T17:45:35.000Z | thrift/protocol/__init__.py | getlove555/getbotline | 639e157495849e12ac7dd4bae6012841cf511892 | [
"MIT"
] | 4 | 2020-08-01T10:10:14.000Z | 2021-01-03T00:55:05.000Z | thrift/protocol/__init__.py | LOUREN03/lourenelle | 5448a8634d438f35df98e43ad135f232cf74d2b1 | [
"MIT"
] | 20 | 2020-05-11T08:53:30.000Z | 2021-07-16T09:50:20.000Z | #LICENCE : http://www.apache.org/licenses/LICENSE-2.0
#CREATOR BY : PRANKBOT
#MOD BY ACIL
__all__ = ['fastbinary', 'TBase', 'TBinaryProtocol', 'TCompactProtocol', 'TJSONProtocol', 'TProtocol']
| 39 | 102 | 0.723077 | #LICENCE : http://www.apache.org/licenses/LICENSE-2.0
#CREATOR BY : PRANKBOT
#MOD BY ACIL
__all__ = ['fastbinary', 'TBase', 'TBinaryProtocol', 'TCompactProtocol', 'TJSONProtocol', 'TProtocol']
| 0 | 0 | 0 |
63eaa450af3c58a5dba398f2348a71fa45fd20c0 | 2,693 | py | Python | navigation_events.py | gkovacs/invideo-quizzes-analysis-las2016 | 6ec8686ef0d3ffa5e994f8dec41590fea87e9539 | [
"MIT"
] | null | null | null | navigation_events.py | gkovacs/invideo-quizzes-analysis-las2016 | 6ec8686ef0d3ffa5e994f8dec41590fea87e9539 | [
"MIT"
] | null | null | null | navigation_events.py | gkovacs/invideo-quizzes-analysis-las2016 | 6ec8686ef0d3ffa5e994f8dec41590fea87e9539 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# md5: a2cb6a826f222fa843ab488ae8a2de22
# coding: utf-8
import json
| 29.593407 | 74 | 0.680654 | #!/usr/bin/env python
# md5: a2cb6a826f222fa843ab488ae8a2de22
# coding: utf-8
import json
class SeekEvent:
def __init__(self, timestamp, start, end, paused, user):
self.timestamp = timestamp
self.start = start
self.end = end
self.event_type = 'seeked'
self.paused = bool(paused)
if self.end >= self.start:
self.direction = 'forward'
else:
self.direction = 'back'
self.user = user
def __str__(self):
return json.dumps(self.__dict__)
def __repr__(self):
return str(self)
class SeekChain:
def __init__(self, seek_events):
assert len(seek_events) > 0
self.seek_events = seek_events
self.start = seek_events[0].start
self.end = seek_events[-1].end
self.event_type = 'seek_chain'
if self.end >= self.start:
self.direction = 'forward'
else:
self.direction = 'back'
self.timestamp = seek_events[0].timestamp
self.timestamp_end = seek_events[-1].timestamp
self.user = seek_events[0].user
def __str__(self):
output = {}
for k,v in self.__dict__.items():
output[k] = v
output['seek_events'] = [x.__dict__ for x in output['seek_events']]
return json.dumps(output)
def __repr__(self):
return str(self)
class PlayEvent:
def __init__(self, timestamp, start, playback_rate):
self.timestamp = timestamp
self.start = start
self.playback_rate = playback_rate
self.event_type = 'play'
class PlaySpan:
def __init__(self, timestamp, timestamp_end, start, end, playback_rate):
self.timestamp = timestamp
self.timestamp_end = timestamp_end
self.start = start
self.end = end
self.playback_rate = playback_rate
self.event_type = 'play_span'
if self.timestamp == None:
raise DataException('PlaySpan timestamp cannot be None')
if self.timestamp_end == None:
raise DataException('PlaySpan timestamp_end cannot be None')
if self.start == None:
raise DataException('PlaySpan start cannot be None')
if self.end == None:
raise DataException('PlaySpan end cannot be None')
if self.playback_rate == None:
raise DataException('PlaySpan playback_rate cannot be None')
def __str__(self):
return '(' + str(self.start) + ', ' + str(self.end) + ')'
def __repr__(self):
return self.__str__()
class PauseEvent:
def __init__(self, timestamp, start):
self.timestamp = timestamp
self.start = start
self.event_type = 'pause'
class RateChangeEvent:
def __init__(self, timestamp, start, playback_rate, paused):
self.timestamp = timestamp
self.start = start
self.playback_rate = playback_rate
self.event_type = 'ratechange'
self.paused = bool(paused)
| 2,199 | -24 | 426 |
9930cfc3b8857f7d8d5760e9dc01072e8dcd4337 | 1,665 | py | Python | sketch n draw.py | Ayush2007A/Code-master | fafe4a020adc3f8e78c78f6b8b2b08b5c3005613 | [
"Unlicense"
] | 1 | 2021-02-05T10:29:30.000Z | 2021-02-05T10:29:30.000Z | sketch n draw.py | Ayush2007A/Code-master | fafe4a020adc3f8e78c78f6b8b2b08b5c3005613 | [
"Unlicense"
] | null | null | null | sketch n draw.py | Ayush2007A/Code-master | fafe4a020adc3f8e78c78f6b8b2b08b5c3005613 | [
"Unlicense"
] | null | null | null | import turtle
from turtle import Turtle, Screen
a = turtle.Turtle()
screen = Screen()
a.shape('turtle')
print('Welcome to sketch n draw')
print('select your color 1 and 2')
color_1 = (input('color 1: '))
color_2 = (input('color 2: '))
print('ok! your colors are ' + color_1 + (' and ') + color_2)
print("changed pad's color according to the requirments")
a.color(color_1, color_2)
print('So now you can proceed to draw')
while 1 == 1:
x = (input('enter command: '))
if x == ('For()'):
y = int(input('enter the distance: '))
a.forward(y)
if x == ('size-pen()'):
z = int(input('enter new pensize: '))
a.pensize(z)
if x == ('Bac()'):
o = int(input('select distance: '))
a.backward(o)
if x == ('dir;dig()'):
ol = int(input('enter degrees: '))
olv = input('enter direction: ')
if olv == ('right'):
a.right(ol)
if olv == ('left'):
a.left(ol)
if x == ('set-cor()'):
color_1 = (input('color 1: '))
color_2 = (input('color 2: '))
a.color(color_1, color_2)
if x == ('"'):
import sys
sys.exit()
if x == ('on-mouse'):
print('sketch with mouse enabled, now you can not use any other function')
main()
| 30.833333 | 83 | 0.493093 | import turtle
from turtle import Turtle, Screen
a = turtle.Turtle()
screen = Screen()
a.shape('turtle')
print('Welcome to sketch n draw')
print('select your color 1 and 2')
color_1 = (input('color 1: '))
color_2 = (input('color 2: '))
print('ok! your colors are ' + color_1 + (' and ') + color_2)
print("changed pad's color according to the requirments")
a.color(color_1, color_2)
print('So now you can proceed to draw')
while 1 == 1:
x = (input('enter command: '))
if x == ('For()'):
y = int(input('enter the distance: '))
a.forward(y)
if x == ('size-pen()'):
z = int(input('enter new pensize: '))
a.pensize(z)
if x == ('Bac()'):
o = int(input('select distance: '))
a.backward(o)
if x == ('dir;dig()'):
ol = int(input('enter degrees: '))
olv = input('enter direction: ')
if olv == ('right'):
a.right(ol)
if olv == ('left'):
a.left(ol)
if x == ('set-cor()'):
color_1 = (input('color 1: '))
color_2 = (input('color 2: '))
a.color(color_1, color_2)
if x == ('"'):
import sys
sys.exit()
if x == ('on-mouse'):
print('sketch with mouse enabled, now you can not use any other function')
def drag(x, y):
a.ondrag(None)
a.setheading(a.towards(x, y))
a.goto(x, y)
a.ondrag(drag)
def cr():
a.clear()
def main():
turtle.listen()
a.ondrag(drag)
turtle.onscreenclick(cr(), 3)
screen.mainloop()
main()
| 251 | 0 | 93 |
0741193fb2dfb937b0530c65ea59f7bbd0aebd09 | 1,481 | py | Python | ppln/experiment.py | Mikhail-M/ppln | 11bacc40ec6808c0b7bae0f9fb6b36b860294417 | [
"MIT"
] | null | null | null | ppln/experiment.py | Mikhail-M/ppln | 11bacc40ec6808c0b7bae0f9fb6b36b860294417 | [
"MIT"
] | null | null | null | ppln/experiment.py | Mikhail-M/ppln | 11bacc40ec6808c0b7bae0f9fb6b36b860294417 | [
"MIT"
] | null | null | null | import torch.distributed as dist
from torch.utils.data import DataLoader
from .data.transforms import make_albumentations
from .factory import make_model
from .hooks import DistSamplerSeedHook, IterTimerHook, LogBufferHook
| 26.927273 | 69 | 0.646185 | import torch.distributed as dist
from torch.utils.data import DataLoader
from .data.transforms import make_albumentations
from .factory import make_model
from .hooks import DistSamplerSeedHook, IterTimerHook, LogBufferHook
class BaseExperiment:
def __init__(self, cfg):
self.cfg = cfg
self._model = None
@property
def optimizers(self):
raise NotImplementedError
@property
def model(self):
if self._model is None:
self._model = make_model(self.cfg.model)
return self._model
def transform(self, mode):
return make_albumentations(self.cfg.transforms[mode])
def dataset(self, mode):
raise NotImplementedError
def sampler(self, mode, dataset):
raise NotImplementedError
def dataloader(self, mode):
dataset = self.dataset(mode)
sampler = self.sampler(mode, dataset)
return DataLoader(
dataset=dataset,
sampler=sampler,
batch_size=self.cfg.data.images_per_gpu,
num_workers=self.cfg.data.workers_per_gpu,
pin_memory=self.cfg.data.pin_memory,
drop_last=mode == 'train'
)
@property
def hooks(self):
hooks = self.cfg['hooks']
if dist.is_initialized():
hooks.append(DistSamplerSeedHook())
return self.cfg['hooks'] + [IterTimerHook(), LogBufferHook()]
@property
def work_dir(self):
return self.cfg.work_dir
| 935 | 298 | 23 |
48c7bf8eab29ebaf195b176af688e2d1694ca5d3 | 699 | py | Python | backend_server/tests/test_pybackend_urilib.py | ismir-net/open-eval | ebb4612ff5a6c6f7b6fbc908d837857f8377d95a | [
"MIT"
] | 6 | 2016-08-09T18:34:18.000Z | 2016-08-12T01:58:05.000Z | backend_server/tests/test_pybackend_urilib.py | cosmir/openmic-annotator | ebb4612ff5a6c6f7b6fbc908d837857f8377d95a | [
"MIT"
] | 4 | 2016-08-13T14:35:00.000Z | 2016-08-13T14:37:25.000Z | backend_server/tests/test_pybackend_urilib.py | ismir-net/open-eval | ebb4612ff5a6c6f7b6fbc908d837857f8377d95a | [
"MIT"
] | null | null | null | import pytest
import pybackend.urilib as U
| 18.394737 | 39 | 0.566524 | import pytest
import pybackend.urilib as U
def test_validate():
U.validate("x:y")
with pytest.raises(ValueError):
U.validate(":x")
with pytest.raises(ValueError):
U.validate("y:")
with pytest.raises(ValueError):
U.validate(":")
with pytest.raises(ValueError):
U.validate("x:x:y")
def test_split():
assert U.split("x:y") == ("x", "y")
with pytest.raises(ValueError):
U.split(":x")
def test_join():
assert U.join("x", "y") == "x:y"
with pytest.raises(ValueError):
U.join("x")
with pytest.raises(ValueError):
U.join("x", "y", "z")
with pytest.raises(ValueError):
U.join(":x", "y")
| 583 | 0 | 69 |
60bade3775753250997ff137e10f278ae7d7d12a | 5,476 | py | Python | examples/slit_channel.py | cwaluga/singularities_dolfin | dd379f71f384717a63906fd701df542a1603b03b | [
"MIT"
] | null | null | null | examples/slit_channel.py | cwaluga/singularities_dolfin | dd379f71f384717a63906fd701df542a1603b03b | [
"MIT"
] | null | null | null | examples/slit_channel.py | cwaluga/singularities_dolfin | dd379f71f384717a63906fd701df542a1603b03b | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# coding=utf-8
"""
Example from the paper Rüde/Waluga/Wohlmuth 2013
"""
__author__ = "Christian Waluga (waluga@ma.tum.de)"
__copyright__ = "Copyright (c) 2013 %s" % __author__
from dolfin import *
from energy_correction.correction import *
from energy_correction.meshtools import *
from energy_correction.singular import *
from energy_correction.extrapolate import *
import math
set_log_level(ERROR)
# convergence rates
# boundary definition for error
if __name__ == '__main__':
# main program
parameters["allow_extrapolation"] = True
maxlevel = 7 # maximum level (this is where 'exact' solution is computed)
compute = False # does the 'exact' solution need to be computed?
gzip = True # gzip the 'exact' solution once computed and saved?
correct = False # do we want energy-correction
mesh_filename = 'meshes/slit-channel-crossed.xml.gz'
cachedgamma = False # use cached values of gamma or recompute with method specified below?
method = 'one-level-inexact'
#method = 'two-level-inexact'
# beyond the L-shape, we have to symmetrize if we want to use only one correction per corner.
# (set to 2pi if the mesh is already symmetric at the corners)
symmetrization_threshold = 2.0*pi
output_weight = False # output the weighting function to VTK?
# find reentrant corners
mesh, corners, angles, corner_meshes \
= generate_corner_info(Mesh(mesh_filename), symmetrization_threshold)
weight = WeightingFunction(corners, angles, 4.0)
#plot(mesh); interactive()
gammas_cached = [0.27914627419934601, 0.27914440403310525, 0.27916755164797358, 0.27912214602624991, 0.27908848023107002, 0.27915828177500074, 0.2791861111329178]
if correct:
if cachedgamma:
print 'using cached gammas'
gammas = gammas_cached
else:
print 'computing gammas'
funcs = [ math.cos for c in corners ] # all Neumann
gammas = extrapolate_gammas(corners, angles, corner_meshes, method = method, \
start_at = maxlevel, extrapolation = 'richardson', \
maxlevel = maxlevel+2, funcs = funcs)[0]
else:
gammas = [0.0 for i in range(len(corners))]
print 'gammas =', gammas
# generate series of refined meshes
print 'generating meshes'
meshes = [ mesh ]
for i in xrange(maxlevel):
meshes.append(refine(meshes[-1]))
if output_weight:
File('output/weight.pvd') << interpolate(weight, FunctionSpace(meshes[3], 'Lagrange', 1))
filename = 'output/uh_fine_{0}'.format(maxlevel)
if compute:
print 'computing fine solution'
if not correct: print 'warning: computing fine solution without correction'
uh_fine = solve_problem(meshes[-1], corners, gammas)
File(filename + '.xml') << uh_fine
File(filename + '.pvd') << uh_fine
if gzip:
from subprocess import call
call(['gzip', '-f', filename + '.xml'])
else:
print 'loading fine solution'
V_fine = FunctionSpace(meshes[-1], 'Lagrange', 1)
uh_fine = Function(V_fine, filename + '.xml.gz' if gzip else '')
errors = []
print 'solving'
# perform a convergence study
for i, mesh in enumerate(meshes[:-2]):
uh = solve_problem(mesh, corners, gammas)
intorder, intmesh = 1, meshes[i+2]
U = project(uh_fine, FunctionSpace(intmesh, 'Lagrange', intorder))
File('output/uh-{0}.pvd'.format(i)) << uh
right_boundary = RightBoundary()
boundaries = FacetFunction("size_t", intmesh)
boundaries.set_all(0)
right_boundary.mark(boundaries, 1)
dG = Measure("ds")[boundaries]
#h = CellSize(mesh)
h = Constant(mesh.hmin())
# compute errors between current level and highest level solutions
err1 = sqrt(assemble((uh - U)**2*dx, mesh = intmesh))
err2 = sqrt(assemble(weight*(uh - U)**2*dx, mesh = intmesh))
#err2 = sqrt(assemble(h*(Dn(uh) - Dn(U))**2*dG(1), mesh = intmesh))
errors.append((err1, err2))
print_rates(errors)
| 30.764045 | 164 | 0.649379 | #! /usr/bin/env python
# coding=utf-8
"""
Example from the paper Rüde/Waluga/Wohlmuth 2013
"""
__author__ = "Christian Waluga (waluga@ma.tum.de)"
__copyright__ = "Copyright (c) 2013 %s" % __author__
from dolfin import *
from energy_correction.correction import *
from energy_correction.meshtools import *
from energy_correction.singular import *
from energy_correction.extrapolate import *
import math
set_log_level(ERROR)
def solve_problem(mesh, corners, gammas):
# right hand side
f = Constant(0.0)
# correct coefficient
V0 = FunctionSpace(mesh, 'DG', 0)
k = interpolate(Constant(1.0), V0)
k = correct_corner_stiffness(k, mesh, corners, gammas)
#plot(k)
# variational form
V = FunctionSpace(mesh, 'Lagrange', 1)
u = TrialFunction(V)
v = TestFunction(V)
a = k*inner(grad(u), grad(v))*dx
L = f*v*dx
# boundary conditions
#bcs = [ DirichletBC(V, Expression('1.0', c = 0.25), 'on_boundary && (x[0] == -2.0)'), \
# DirichletBC(V, Expression('0.0', c = 0.25), 'on_boundary && (x[0] == +2.0)') ]
bcs = [ DirichletBC(V, Expression('1.0+c*cos(pi*x[1])', c = 0.25), 'on_boundary && (x[0] == -2.0)'), \
DirichletBC(V, Expression('0.0+c*cos(pi*x[1])', c = 0.25), 'on_boundary && (x[0] == +2.0)') ]
# solve the variational problem
uh = Function(V)
print 'problem size:', V.dim()
solver_parameters = { \
"linear_solver": "bicgstab", \
"preconditioner": "petsc_amg" }
solve(a == L, uh, bcs, solver_parameters = solver_parameters)
return uh
# convergence rates
def print_rates(E):
from math import log as lg
print '%.4e - %.4e - ' \
%(E[0][0], E[0][1])
for i in range(1, len(E)):
r1 = lg(E[i-1][0]/E[i][0], 2)
r2 = lg(E[i-1][1]/E[i][1], 2)
print '%.4e %.2f %.4e %.2f' \
%(E[i][0], r1, E[i][1], r2)
print '\n'
# boundary definition for error
class RightBoundary(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and near(x[0], 2.0)
if __name__ == '__main__':
# main program
parameters["allow_extrapolation"] = True
maxlevel = 7 # maximum level (this is where 'exact' solution is computed)
compute = False # does the 'exact' solution need to be computed?
gzip = True # gzip the 'exact' solution once computed and saved?
correct = False # do we want energy-correction
mesh_filename = 'meshes/slit-channel-crossed.xml.gz'
cachedgamma = False # use cached values of gamma or recompute with method specified below?
method = 'one-level-inexact'
#method = 'two-level-inexact'
# beyond the L-shape, we have to symmetrize if we want to use only one correction per corner.
# (set to 2pi if the mesh is already symmetric at the corners)
symmetrization_threshold = 2.0*pi
output_weight = False # output the weighting function to VTK?
# find reentrant corners
mesh, corners, angles, corner_meshes \
= generate_corner_info(Mesh(mesh_filename), symmetrization_threshold)
weight = WeightingFunction(corners, angles, 4.0)
#plot(mesh); interactive()
gammas_cached = [0.27914627419934601, 0.27914440403310525, 0.27916755164797358, 0.27912214602624991, 0.27908848023107002, 0.27915828177500074, 0.2791861111329178]
if correct:
if cachedgamma:
print 'using cached gammas'
gammas = gammas_cached
else:
print 'computing gammas'
funcs = [ math.cos for c in corners ] # all Neumann
gammas = extrapolate_gammas(corners, angles, corner_meshes, method = method, \
start_at = maxlevel, extrapolation = 'richardson', \
maxlevel = maxlevel+2, funcs = funcs)[0]
else:
gammas = [0.0 for i in range(len(corners))]
print 'gammas =', gammas
# generate series of refined meshes
print 'generating meshes'
meshes = [ mesh ]
for i in xrange(maxlevel):
meshes.append(refine(meshes[-1]))
if output_weight:
File('output/weight.pvd') << interpolate(weight, FunctionSpace(meshes[3], 'Lagrange', 1))
filename = 'output/uh_fine_{0}'.format(maxlevel)
if compute:
print 'computing fine solution'
if not correct: print 'warning: computing fine solution without correction'
uh_fine = solve_problem(meshes[-1], corners, gammas)
File(filename + '.xml') << uh_fine
File(filename + '.pvd') << uh_fine
if gzip:
from subprocess import call
call(['gzip', '-f', filename + '.xml'])
else:
print 'loading fine solution'
V_fine = FunctionSpace(meshes[-1], 'Lagrange', 1)
uh_fine = Function(V_fine, filename + '.xml.gz' if gzip else '')
errors = []
print 'solving'
# perform a convergence study
for i, mesh in enumerate(meshes[:-2]):
uh = solve_problem(mesh, corners, gammas)
intorder, intmesh = 1, meshes[i+2]
U = project(uh_fine, FunctionSpace(intmesh, 'Lagrange', intorder))
File('output/uh-{0}.pvd'.format(i)) << uh
right_boundary = RightBoundary()
boundaries = FacetFunction("size_t", intmesh)
boundaries.set_all(0)
right_boundary.mark(boundaries, 1)
dG = Measure("ds")[boundaries]
#h = CellSize(mesh)
h = Constant(mesh.hmin())
# compute errors between current level and highest level solutions
err1 = sqrt(assemble((uh - U)**2*dx, mesh = intmesh))
err2 = sqrt(assemble(weight*(uh - U)**2*dx, mesh = intmesh))
#err2 = sqrt(assemble(h*(Dn(uh) - Dn(U))**2*dG(1), mesh = intmesh))
errors.append((err1, err2))
print_rates(errors)
| 1,394 | 10 | 91 |
a7c5fe548eda7fb588f956399f68c6d16da1faec | 1,314 | py | Python | ca_on_wellesley/people.py | dcycle/scrapers-ca | 4c7a6cd01d603221b5b3b7a400d2e5ca0c6e916f | [
"MIT"
] | 19 | 2015-05-26T03:18:50.000Z | 2022-01-31T03:27:41.000Z | ca_on_wellesley/people.py | dcycle/scrapers-ca | 4c7a6cd01d603221b5b3b7a400d2e5ca0c6e916f | [
"MIT"
] | 119 | 2015-01-09T06:09:35.000Z | 2022-01-20T23:05:05.000Z | ca_on_wellesley/people.py | dcycle/scrapers-ca | 4c7a6cd01d603221b5b3b7a400d2e5ca0c6e916f | [
"MIT"
] | 17 | 2015-11-23T05:00:10.000Z | 2021-09-15T16:03:33.000Z | from utils import CanadianScraper, CanadianPerson as Person
import re
COUNCIL_PAGE = 'http://www.wellesley.ca/council/councillors/?q=council/councillors'
| 34.578947 | 95 | 0.571537 | from utils import CanadianScraper, CanadianPerson as Person
import re
COUNCIL_PAGE = 'http://www.wellesley.ca/council/councillors/?q=council/councillors'
def post_number(name):
return {
'Ward One': 'Ward 1',
'Ward Two': 'Ward 2',
'Ward Three': 'Ward 3',
'Ward Four': 'Ward 4'
}[name]
class WellesleyPersonScraper(CanadianScraper):
def scrape(self):
page = self.lxmlize(COUNCIL_PAGE)
members = [el for el in page.xpath('//div[@id="printAreaContent"]//td') if
el.text_content().strip().lower().split()[0] in ["mayor", "councillor"]][1:]
assert len(members) == 5
for member in members:
position = member.text_content().split()[0]
srch = re.search(r'\w+(.+?) is.*? for (.+?)\.', member.text_content().strip())
name = srch.group(1).strip()
district = srch.group(2).strip()
phone = self.get_phone(member)
if position == "Mayor":
district = "Wellesley"
else:
district = post_number(district)
p = Person(primary_org='legislature', name=name, district=district, role=position)
p.add_contact('voice', phone, 'legislature')
p.add_source(COUNCIL_PAGE)
yield p
| 1,060 | 25 | 72 |
fb53dcf3b5f85da5350281d96e99a77c6877114e | 1,261 | py | Python | api/client/src/test/test_update_cluster_bad_request_exception_response_content.py | maclema/aws-parallelcluster | ade6e5e76201ee43c6e222fcd1c2891aba938838 | [
"Apache-2.0"
] | 415 | 2018-11-13T15:02:15.000Z | 2022-03-31T15:26:06.000Z | api/client/src/test/test_update_cluster_bad_request_exception_response_content.py | maclema/aws-parallelcluster | ade6e5e76201ee43c6e222fcd1c2891aba938838 | [
"Apache-2.0"
] | 2,522 | 2018-11-13T16:16:27.000Z | 2022-03-31T13:57:10.000Z | api/client/src/test/test_update_cluster_bad_request_exception_response_content.py | yuleiwan/aws-parallelcluster | aad2a3019ef4ad08d702f5acf41b152b3f7a0b46 | [
"Apache-2.0"
] | 164 | 2018-11-14T22:47:46.000Z | 2022-03-22T11:33:22.000Z | """
ParallelCluster
ParallelCluster API # noqa: E501
The version of the OpenAPI document: 3.0.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import pcluster.client
from pcluster.client.model.change import Change
from pcluster.client.model.config_validation_message import ConfigValidationMessage
from pcluster.client.model.update_error import UpdateError
globals()['Change'] = Change
globals()['ConfigValidationMessage'] = ConfigValidationMessage
globals()['UpdateError'] = UpdateError
from pcluster.client.model.update_cluster_bad_request_exception_response_content import UpdateClusterBadRequestExceptionResponseContent
class TestUpdateClusterBadRequestExceptionResponseContent(unittest.TestCase):
"""UpdateClusterBadRequestExceptionResponseContent unit test stubs"""
def testUpdateClusterBadRequestExceptionResponseContent(self):
"""Test UpdateClusterBadRequestExceptionResponseContent"""
# FIXME: construct object with mandatory attributes with example values
# model = UpdateClusterBadRequestExceptionResponseContent() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 30.02381 | 135 | 0.77954 | """
ParallelCluster
ParallelCluster API # noqa: E501
The version of the OpenAPI document: 3.0.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import pcluster.client
from pcluster.client.model.change import Change
from pcluster.client.model.config_validation_message import ConfigValidationMessage
from pcluster.client.model.update_error import UpdateError
globals()['Change'] = Change
globals()['ConfigValidationMessage'] = ConfigValidationMessage
globals()['UpdateError'] = UpdateError
from pcluster.client.model.update_cluster_bad_request_exception_response_content import UpdateClusterBadRequestExceptionResponseContent
class TestUpdateClusterBadRequestExceptionResponseContent(unittest.TestCase):
"""UpdateClusterBadRequestExceptionResponseContent unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUpdateClusterBadRequestExceptionResponseContent(self):
"""Test UpdateClusterBadRequestExceptionResponseContent"""
# FIXME: construct object with mandatory attributes with example values
# model = UpdateClusterBadRequestExceptionResponseContent() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 19 | 0 | 54 |
87793261619b7cb430625fdd5dd5441808c7b619 | 2,864 | py | Python | qiskit/providers/aer/pulse/de_solvers/pulse_de_solver.py | SooluThomas/qiskit-aer | 1602b1cc35a18f5a008c38433f607c7ad0870ce9 | [
"Apache-2.0"
] | 1 | 2019-05-19T10:30:03.000Z | 2019-05-19T10:30:03.000Z | qiskit/providers/aer/pulse/de_solvers/pulse_de_solver.py | stefan-woerner/qiskit-aer | cef92153dad6e9acded627b2e3cb10c5a1d7851a | [
"Apache-2.0"
] | null | null | null | qiskit/providers/aer/pulse/de_solvers/pulse_de_solver.py | stefan-woerner/qiskit-aer | cef92153dad6e9acded627b2e3cb10c5a1d7851a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
# pylint: disable=no-value-for-parameter, invalid-name, import-error
"""Pulse DE solver for problems in qutip format."""
import numpy as np
from scipy.integrate import ode
from scipy.integrate._ode import zvode
# pylint: disable=no-name-in-module
from .pulse_utils import td_ode_rhs_static
def construct_pulse_zvode_solver(exp, op_system):
""" Constructs a scipy ODE solver for a given exp and op_system
Parameters:
exp (dict): dict containing experimental
op_system (PulseSimDescription): container for simulation information
Returns:
ode: scipy ode
"""
# extract relevant data from op_system
global_data = op_system.global_data
ode_options = op_system.ode_options
channels = dict(op_system.channels)
# Init register
register = np.ones(global_data['n_registers'], dtype=np.uint8)
ODE = ode(td_ode_rhs_static)
ODE.set_f_params(global_data, exp, op_system.system, channels, register)
ODE._integrator = qiskit_zvode(method=ode_options.method,
order=ode_options.order,
atol=ode_options.atol,
rtol=ode_options.rtol,
nsteps=ode_options.nsteps,
first_step=ode_options.first_step,
min_step=ode_options.min_step,
max_step=ode_options.max_step
)
# Forces complex ODE solving
if not ODE._y:
ODE.t = 0.0
ODE._y = np.array([0.0], complex)
ODE._integrator.reset(len(ODE._y), ODE.jac is not None)
# Since all experiments are defined to start at zero time.
ODE.set_initial_value(global_data['initial_state'], 0)
return ODE
class qiskit_zvode(zvode):
"""Modifies the stepper for ZVODE so that
it always stops at a given time in tlist;
by default, it over shoots the time.
"""
| 33.302326 | 77 | 0.641411 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
# pylint: disable=no-value-for-parameter, invalid-name, import-error
"""Pulse DE solver for problems in qutip format."""
import numpy as np
from scipy.integrate import ode
from scipy.integrate._ode import zvode
# pylint: disable=no-name-in-module
from .pulse_utils import td_ode_rhs_static
def construct_pulse_zvode_solver(exp, op_system):
""" Constructs a scipy ODE solver for a given exp and op_system
Parameters:
exp (dict): dict containing experimental
op_system (PulseSimDescription): container for simulation information
Returns:
ode: scipy ode
"""
# extract relevant data from op_system
global_data = op_system.global_data
ode_options = op_system.ode_options
channels = dict(op_system.channels)
# Init register
register = np.ones(global_data['n_registers'], dtype=np.uint8)
ODE = ode(td_ode_rhs_static)
ODE.set_f_params(global_data, exp, op_system.system, channels, register)
ODE._integrator = qiskit_zvode(method=ode_options.method,
order=ode_options.order,
atol=ode_options.atol,
rtol=ode_options.rtol,
nsteps=ode_options.nsteps,
first_step=ode_options.first_step,
min_step=ode_options.min_step,
max_step=ode_options.max_step
)
# Forces complex ODE solving
if not ODE._y:
ODE.t = 0.0
ODE._y = np.array([0.0], complex)
ODE._integrator.reset(len(ODE._y), ODE.jac is not None)
# Since all experiments are defined to start at zero time.
ODE.set_initial_value(global_data['initial_state'], 0)
return ODE
class qiskit_zvode(zvode):
"""Modifies the stepper for ZVODE so that
it always stops at a given time in tlist;
by default, it over shoots the time.
"""
def step(self, *args):
itask = self.call_args[2]
self.rwork[0] = args[4]
self.call_args[2] = 5
r = self.run(*args)
self.call_args[2] = itask
return r
| 176 | 0 | 26 |
8424167fac0970dd1983c3a928d7777a5ac90453 | 190 | py | Python | TEST/RPLCD_CharLCD.py | sireline/raspi | f3f1879d3c8cc4e702b8f0f7c5fcc4d9bf5901c1 | [
"MIT"
] | null | null | null | TEST/RPLCD_CharLCD.py | sireline/raspi | f3f1879d3c8cc4e702b8f0f7c5fcc4d9bf5901c1 | [
"MIT"
] | 1 | 2019-03-14T11:42:15.000Z | 2019-03-14T11:42:15.000Z | TEST/RPLCD_CharLCD.py | sireline/raspi | f3f1879d3c8cc4e702b8f0f7c5fcc4d9bf5901c1 | [
"MIT"
] | null | null | null | from RPLCD import CharLCD
import RPi.GPIO as GPIO
lcd = CharLCD(numbering_mode=GPIO.BOARD, cols=16, rows=2, pin_rs=37, pin_e=35, pins_data=[33, 31, 29, 23])
lcd.write_string(u'Hello World')
| 38 | 106 | 0.752632 | from RPLCD import CharLCD
import RPi.GPIO as GPIO
lcd = CharLCD(numbering_mode=GPIO.BOARD, cols=16, rows=2, pin_rs=37, pin_e=35, pins_data=[33, 31, 29, 23])
lcd.write_string(u'Hello World')
| 0 | 0 | 0 |
30ac7343bb8e51ff66295aa68294f848fe139c9b | 14,856 | py | Python | indicators/tests/iptt_tests/indicator_report_data_queries.py | mercycorps/toladata | 4d5f9b45905a81af9981b586690e020d5b3bfc60 | [
"Apache-2.0"
] | null | null | null | indicators/tests/iptt_tests/indicator_report_data_queries.py | mercycorps/toladata | 4d5f9b45905a81af9981b586690e020d5b3bfc60 | [
"Apache-2.0"
] | 268 | 2020-03-31T15:46:59.000Z | 2022-03-31T18:01:08.000Z | indicators/tests/iptt_tests/indicator_report_data_queries.py | mercycorps/toladata | 4d5f9b45905a81af9981b586690e020d5b3bfc60 | [
"Apache-2.0"
] | 1 | 2021-01-05T01:58:24.000Z | 2021-01-05T01:58:24.000Z | """Tests for the IPTT Report Data Indicators (TP/TVA) to ensure their query counts stay O(n) and not O(n^2)
- api_report_data takes program_pk and frequency, calls IPTT<TVA/TP>ReportIndicatorSerializer.load_report
- IPTT<TVA/TP>ReportIndicatorSerializer.load_report takes program_pk and frequency
- queries for program data (start/end dates)
- queries for disaggregations data ??
- calls IPTTIndicator.tva/timperiods
- IPTTIndicator
- get queryset adds prefetch
- with_annotations adds lop_target lop_actual, lop_percent_met, and old_level if necessary
- with_disaggaggregation_annotations takes disaggregation_category_pks and adds lop_actual for each
- with_frequency_annotations takes freq, start, end, and disaggregation_category_pks and adds
frequency_disaggregation_actual for each disaggregation and overall frequency actual
(if TVA also adds overall frequency target and percent met)
"""
from django import test
from indicators.models import Indicator, PeriodicTarget
from indicators.queries.iptt_queries import IPTTIndicator
from factories import (
indicators_models as i_factories,
workflow_models as w_factories
)
QUERIES_PREFETCH = 8
QUERIES_FREQUENCIES = QUERIES_PREFETCH + 0
QUERIES_DISAGG_FREQUENCIES = QUERIES_FREQUENCIES + 0
TP_QUERYSET = IPTTIndicator.timeperiods
TVA_QUERYSET = IPTTIndicator.tva
| 54.021818 | 117 | 0.580506 | """Tests for the IPTT Report Data Indicators (TP/TVA) to ensure their query counts stay O(n) and not O(n^2)
- api_report_data takes program_pk and frequency, calls IPTT<TVA/TP>ReportIndicatorSerializer.load_report
- IPTT<TVA/TP>ReportIndicatorSerializer.load_report takes program_pk and frequency
- queries for program data (start/end dates)
- queries for disaggregations data ??
- calls IPTTIndicator.tva/timperiods
- IPTTIndicator
- get queryset adds prefetch
- with_annotations adds lop_target lop_actual, lop_percent_met, and old_level if necessary
- with_disaggaggregation_annotations takes disaggregation_category_pks and adds lop_actual for each
- with_frequency_annotations takes freq, start, end, and disaggregation_category_pks and adds
frequency_disaggregation_actual for each disaggregation and overall frequency actual
(if TVA also adds overall frequency target and percent met)
"""
from django import test
from indicators.models import Indicator, PeriodicTarget
from indicators.queries.iptt_queries import IPTTIndicator
from factories import (
indicators_models as i_factories,
workflow_models as w_factories
)
QUERIES_PREFETCH = 8
QUERIES_FREQUENCIES = QUERIES_PREFETCH + 0
QUERIES_DISAGG_FREQUENCIES = QUERIES_FREQUENCIES + 0
TP_QUERYSET = IPTTIndicator.timeperiods
TVA_QUERYSET = IPTTIndicator.tva
class TestIPTTIndicatorQuerysetPrefetch(test.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.standard_disagg = i_factories.DisaggregationTypeFactory(
standard=True,
country=None,
disaggregation_type="Test Standard Disagg",
labels=["Test SD Label {}".format(c+1) for c in range(5)]
)
cls.country = w_factories.CountryFactory(country="TestLand", code="TL")
cls.country_disagg = i_factories.DisaggregationTypeFactory(
standard=False,
country=cls.country,
disaggregation_type="Test Country Disagg",
labels=["Test CD Label {}".format(c+1) for c in range(5)]
)
cls.category_pks = [
label.pk for disagg in [cls.standard_disagg, cls.country_disagg] for label in disagg.labels
]
cls.program = w_factories.RFProgramFactory()
cls.program.country.set([cls.country])
cls.periods = {}
for frequency in Indicator.REGULAR_TARGET_FREQUENCIES:
cls.periods[frequency] = [p for p in PeriodicTarget.generate_for_frequency(frequency)(
cls.program.reporting_period_start,
cls.program.reporting_period_end
)]
cls.indicators = []
cls.results = []
cls.disaggs = []
for frequency, _ in Indicator.TARGET_FREQUENCIES[:-1]:
indicator = i_factories.RFIndicatorFactory(
program=cls.program,
target_frequency=frequency,
targets=1000,
results=True
)
indicator.disaggregation.set([cls.standard_disagg, cls.country_disagg])
results = 0
disaggs = 0
for result in indicator.result_set.all():
results += result.achieved
disaggs += 2
for label in [label for disagg in indicator.disaggregation.all() for label in disagg.labels]:
i_factories.DisaggregatedValueFactory(
result=result,
category=label,
value=2
)
cls.indicators.append(indicator)
cls.results.append(results)
cls.disaggs.append(disaggs)
cls.period_results = [
[],
[],
[],
[[], [10,], [20,], [10,], [20,], [30,], [40,], [120,]],
[[], [10, None], [20, None], [10, None], [10, 10], [20, 10], [20, 20], [60, 60]],
[[], [10, None, None], [20, None, None], [10, None, None], [10, 10, None],
[10, 10, 10], [20, 10, 10], [40, 40, 40]]
]
def test_indicator_queryset_queries_one_indicator(self):
filters = {'pk': self.indicators[0].pk}
for queryset in [TP_QUERYSET, TVA_QUERYSET]:
with self.assertNumQueries(QUERIES_PREFETCH):
indicator = queryset.filter(**filters).first()
# has rf marked
self.assertTrue(indicator.using_results_framework)
self.assertEqual(indicator.lop_target_calculated, 1000)
self.assertEqual(indicator.lop_actual, self.results[0])
self.assertAlmostEqual(indicator.lop_percent_met, float(self.results[0])/1000)
self.assertCountEqual(indicator.disaggregation_category_pks, self.category_pks)
for queryset in [TP_QUERYSET, TVA_QUERYSET]:
with self.assertNumQueries(QUERIES_PREFETCH):
indicator = queryset.filter(**filters).with_disaggregation_lop_annotations(self.category_pks).first()
for category_pk in self.category_pks:
self.assertEqual(
getattr(indicator, f'disaggregation_{category_pk}_lop_actual'),
self.disaggs[0]
)
for frequency in Indicator.REGULAR_TARGET_FREQUENCIES:
period_count = len(self.periods[frequency])
with self.assertNumQueries(QUERIES_FREQUENCIES):
indicator = TP_QUERYSET.filter(**filters).with_disaggregation_lop_annotations(
self.category_pks
).with_frequency_annotations(
frequency, self.program.reporting_period_start, self.program.reporting_period_end,
).first()
self.assertEqual(getattr(indicator, f'frequency_{frequency}_count'), period_count)
for c in range(period_count):
self.assertEqual(
getattr(indicator, f'frequency_{frequency}_period_{c}'),
10 if c == 0 else None
)
with self.assertNumQueries(QUERIES_DISAGG_FREQUENCIES):
indicator = TP_QUERYSET.filter(**filters).with_disaggregation_lop_annotations(
self.category_pks
).with_frequency_annotations(
frequency, self.program.reporting_period_start, self.program.reporting_period_end
).with_disaggregation_frequency_annotations(
frequency, self.program.reporting_period_start, self.program.reporting_period_end,
disaggregations=self.category_pks
).first()
for c in range(period_count):
for category_pk in self.category_pks:
self.assertEqual(
getattr(indicator, f'disaggregation_{category_pk}_frequency_{frequency}_period_{c}'),
2 if c == 0 else None
)
def test_indicator_queryset_queries_two_indicators(self):
filters = {'pk__in': [self.indicators[0].pk, self.indicators[1].pk]}
for queryset in [TP_QUERYSET, TVA_QUERYSET]:
with self.assertNumQueries(QUERIES_PREFETCH):
indicators = [indicator for indicator in queryset.filter(**filters)]
for c, indicator in enumerate(indicators):
self.assertTrue(indicator.using_results_framework)
self.assertEqual(indicator.lop_target_calculated, 1000)
self.assertEqual(indicator.lop_actual, self.results[c])
self.assertAlmostEqual(indicator.lop_percent_met, float(self.results[c])/1000)
self.assertCountEqual(indicator.disaggregation_category_pks, self.category_pks)
with self.assertNumQueries(QUERIES_PREFETCH):
indicators = [indicator for indicator in queryset.filter(
**filters
).with_disaggregation_lop_annotations(self.category_pks)]
for c, indicator in enumerate(indicators):
for category_pk in self.category_pks:
self.assertEqual(
getattr(indicator, 'disaggregation_{}_lop_actual'.format(category_pk)),
self.disaggs[c]
)
for frequency in Indicator.REGULAR_TARGET_FREQUENCIES:
period_count = len(self.periods[frequency])
with self.assertNumQueries(QUERIES_FREQUENCIES):
indicators = [indicator for indicator in TP_QUERYSET.filter(
**filters
).with_disaggregation_lop_annotations(self.category_pks).with_frequency_annotations(
frequency, self.program.reporting_period_start, self.program.reporting_period_end,
)]
for indicator in indicators:
self.assertEqual(getattr(indicator, 'frequency_{}_count'.format(frequency)), period_count)
for c in range(period_count):
achieved = getattr(indicator, 'frequency_{}_period_{}'.format(frequency, c))
if frequency in [1, 2, 3, 4, 5]:
self.assertEqual(
achieved,
self.period_results[frequency][indicator.target_frequency][c]
)
with self.assertNumQueries(QUERIES_DISAGG_FREQUENCIES):
indicators = [
indicator for indicator in TP_QUERYSET.filter(**filters).with_disaggregation_lop_annotations(
self.category_pks
).with_frequency_annotations(
frequency, self.program.reporting_period_start, self.program.reporting_period_end
).with_disaggregation_frequency_annotations(
frequency, self.program.reporting_period_start, self.program.reporting_period_end,
disaggregations=self.category_pks
)]
for indicator in indicators:
for c in range(period_count):
for category_pk in self.category_pks:
d_value = getattr(
indicator, f'disaggregation_{category_pk}_frequency_{frequency}_period_{c}'
)
if frequency in [1, 2, 3, 4, 5]:
expected = self.period_results[frequency][indicator.target_frequency][c]
if expected is not None:
expected = expected/5
self.assertEqual(
d_value, expected
)
def test_indicator_queryset_queries_multiple_indicators(self):
filters = {'program': self.program}
for queryset in [TP_QUERYSET, TVA_QUERYSET]:
with self.assertNumQueries(QUERIES_PREFETCH):
indicators = [indicator for indicator in queryset.filter(**filters)]
for c, indicator in enumerate(indicators):
self.assertTrue(indicator.using_results_framework)
self.assertEqual(indicator.lop_target_calculated, 1000)
self.assertEqual(indicator.lop_actual, self.results[c])
self.assertAlmostEqual(indicator.lop_percent_met, float(self.results[c])/1000)
self.assertCountEqual(indicator.disaggregation_category_pks, self.category_pks)
with self.assertNumQueries(QUERIES_PREFETCH):
indicators = [
indicator for indicator in queryset.filter(
**filters
).with_disaggregation_lop_annotations(self.category_pks)]
for c, indicator in enumerate(indicators):
for category_pk in self.category_pks:
self.assertEqual(
getattr(indicator, 'disaggregation_{}_lop_actual'.format(category_pk)),
self.disaggs[c]
)
for frequency in Indicator.REGULAR_TARGET_FREQUENCIES:
period_count = len(self.periods[frequency])
with self.assertNumQueries(QUERIES_FREQUENCIES):
indicators = [indicator for indicator in TP_QUERYSET.filter(
**filters
).with_disaggregation_lop_annotations(self.category_pks).with_frequency_annotations(
frequency, self.program.reporting_period_start, self.program.reporting_period_end
)]
for indicator in indicators:
self.assertEqual(getattr(indicator, 'frequency_{}_count'.format(frequency)), period_count)
for c in range(period_count):
achieved = getattr(indicator, 'frequency_{}_period_{}'.format(frequency, c))
if frequency in [1, 2, 3, 4, 5]:
self.assertEqual(
achieved,
self.period_results[frequency][indicator.target_frequency][c]
)
with self.assertNumQueries(QUERIES_DISAGG_FREQUENCIES):
indicators = [indicator for indicator in TP_QUERYSET.filter(
**filters
).with_disaggregation_lop_annotations(
self.category_pks
).with_frequency_annotations(
frequency, self.program.reporting_period_start, self.program.reporting_period_end
).with_disaggregation_frequency_annotations(
frequency, self.program.reporting_period_start, self.program.reporting_period_end,
disaggregations=self.category_pks
)]
for indicator in indicators:
for c in range(period_count):
for category_pk in self.category_pks:
d_value = getattr(
indicator, f'disaggregation_{category_pk}_frequency_{frequency}_period_{c}'
)
if frequency in [1, 2, 3, 4, 5]:
expected = self.period_results[frequency][indicator.target_frequency][c]
if expected is not None:
expected = expected/5
self.assertEqual(
d_value, expected
)
| 13,250 | 158 | 23 |
5dfc3ef0fa75c74b1c61d2f529a6109260b3259f | 4,081 | py | Python | MAOL/personal_list/models.py | noxlock/my-anime-openings-list | 4ab7660f226af5f06b941c657b8ff611ec8c63bc | [
"MIT"
] | null | null | null | MAOL/personal_list/models.py | noxlock/my-anime-openings-list | 4ab7660f226af5f06b941c657b8ff611ec8c63bc | [
"MIT"
] | 4 | 2021-12-31T07:56:10.000Z | 2021-12-31T09:43:27.000Z | MAOL/personal_list/models.py | noxlock/my-anime-openings-list | 4ab7660f226af5f06b941c657b8ff611ec8c63bc | [
"MIT"
] | null | null | null | from django.db import models
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.core.validators import MaxValueValidator
from imagekit.models import ProcessedImageField
from imagekit.processors import ResizeToFill
from home.models import ModelAbstract, Song
class Profile(ModelAbstract):
"""
A user's profile. Data like profile picture/banner is stored here,
but also anything else that should probably be extending the
default User model.
It takes hard work to set a custom User model once you've already
started a project, and most of the things I want would
fit nicely in a model like this anyways.
"""
user = models.OneToOneField(User, on_delete=models.CASCADE)
avatar = ProcessedImageField(
upload_to='avatars', default='avatars/placeholder.png',
processors=[ResizeToFill(256, 256)],
format="PNG"
)
banner = ProcessedImageField(
upload_to='banners', default='banners/placeholder.png',
processors=[ResizeToFill(1300, 400)],
format="PNG"
)
# def get_recent_ratings(self, limit=20):
# """
# Get the user's recently rated songs.
# @limit: How many songs to get
# """
# ratings = self.user.songlist.songrating_set.all()
# songs = Song.objects.filter(songrating__in=ratings).values('video_link', 'anime__cover', 'pk').order_by(
# '-songrating__last_modified')[:limit]
# return songs
def get_top_ratings(self, limit=20):
"""
Get the user's recently rated songs.
Gets very little details, mainly just used for carousels.
@limit: How many songs to get (default 20)
"""
# Grab all the user's ratings
ratings = self.user.songlist.songrating_set.all()
# Filter through the highest rated, and then most recently rated songs rated by the user.
songs = Song.objects.filter(songrating__in=ratings).values('video_link', 'anime__cover', 'pk').order_by(
'-songrating__rating', 'songrating__last_modified')[:limit]
return songs
def get_rated_songs(self, limit=None):
"""
Grab all songs that a User has rated.
Used for the user's list.
@limit: How many songs to get
"""
# Grab all the user's SongRatings, along with details about the song.
ratings = SongRating.objects.filter(parent_list=self.user.songlist).values(
'song__anime__cover', 'song__anime__english_name',
'song__song_type', 'song__number', 'rating', 'song__video_link', 'song__pk').order_by(
'-rating', '-last_modified'
)
return ratings
# On User creation, make a profile as well
post_save.connect(create_profile, sender=User)
class SongList(ModelAbstract):
"""
A user's list of songs
"""
user = models.OneToOneField(User, on_delete=models.CASCADE)
# On User creation, make a profile as well
post_save.connect(create_songlist, sender=User)
class SongRating(ModelAbstract):
"""
How a user has rated a song in their list
"""
song = models.ForeignKey('home.Song', on_delete=models.CASCADE)
parent_list = models.ForeignKey(SongList, on_delete=models.CASCADE)
rating = models.PositiveIntegerField(null=True, blank=True, validators=[MaxValueValidator(10)])
| 30.684211 | 114 | 0.666503 | from django.db import models
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.core.validators import MaxValueValidator
from imagekit.models import ProcessedImageField
from imagekit.processors import ResizeToFill
from home.models import ModelAbstract, Song
class Profile(ModelAbstract):
"""
A user's profile. Data like profile picture/banner is stored here,
but also anything else that should probably be extending the
default User model.
It takes hard work to set a custom User model once you've already
started a project, and most of the things I want would
fit nicely in a model like this anyways.
"""
user = models.OneToOneField(User, on_delete=models.CASCADE)
avatar = ProcessedImageField(
upload_to='avatars', default='avatars/placeholder.png',
processors=[ResizeToFill(256, 256)],
format="PNG"
)
banner = ProcessedImageField(
upload_to='banners', default='banners/placeholder.png',
processors=[ResizeToFill(1300, 400)],
format="PNG"
)
# def get_recent_ratings(self, limit=20):
# """
# Get the user's recently rated songs.
# @limit: How many songs to get
# """
# ratings = self.user.songlist.songrating_set.all()
# songs = Song.objects.filter(songrating__in=ratings).values('video_link', 'anime__cover', 'pk').order_by(
# '-songrating__last_modified')[:limit]
# return songs
def get_top_ratings(self, limit=20):
"""
Get the user's recently rated songs.
Gets very little details, mainly just used for carousels.
@limit: How many songs to get (default 20)
"""
# Grab all the user's ratings
ratings = self.user.songlist.songrating_set.all()
# Filter through the highest rated, and then most recently rated songs rated by the user.
songs = Song.objects.filter(songrating__in=ratings).values('video_link', 'anime__cover', 'pk').order_by(
'-songrating__rating', 'songrating__last_modified')[:limit]
return songs
def get_rated_songs(self, limit=None):
"""
Grab all songs that a User has rated.
Used for the user's list.
@limit: How many songs to get
"""
# Grab all the user's SongRatings, along with details about the song.
ratings = SongRating.objects.filter(parent_list=self.user.songlist).values(
'song__anime__cover', 'song__anime__english_name',
'song__song_type', 'song__number', 'rating', 'song__video_link', 'song__pk').order_by(
'-rating', '-last_modified'
)
return ratings
def __str__(self):
return self.user.username + '\'s Profile'
# On User creation, make a profile as well
def create_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
post_save.connect(create_profile, sender=User)
class SongList(ModelAbstract):
"""
A user's list of songs
"""
user = models.OneToOneField(User, on_delete=models.CASCADE)
def __str__(self):
return self.user.username + '\'s List'
# On User creation, make a profile as well
def create_songlist(sender, instance, created, **kwargs):
if created:
SongList.objects.create(user=instance)
post_save.connect(create_songlist, sender=User)
class SongRating(ModelAbstract):
"""
How a user has rated a song in their list
"""
song = models.ForeignKey('home.Song', on_delete=models.CASCADE)
parent_list = models.ForeignKey(SongList, on_delete=models.CASCADE)
rating = models.PositiveIntegerField(null=True, blank=True, validators=[MaxValueValidator(10)])
class Meta:
# You should only be able to have one rating per song.
constraints = [
models.UniqueConstraint(fields=['parent_list', 'song'], name='one_rating_only')
]
def __str__(self):
return str(self.rating) + ' ' + str(self.song)
| 339 | 179 | 152 |
9afa88946cd84c21fe6fd73d3c1f70f534351283 | 192 | py | Python | backend/app/db/mongodb.py | soumitdas/xmeme | a0a39c55e1cb11f9cd3d001f88df88d2913efbd9 | [
"MIT"
] | null | null | null | backend/app/db/mongodb.py | soumitdas/xmeme | a0a39c55e1cb11f9cd3d001f88df88d2913efbd9 | [
"MIT"
] | null | null | null | backend/app/db/mongodb.py | soumitdas/xmeme | a0a39c55e1cb11f9cd3d001f88df88d2913efbd9 | [
"MIT"
] | null | null | null | from motor.motor_asyncio import AsyncIOMotorClient
db = Database() | 21.333333 | 50 | 0.776042 | from motor.motor_asyncio import AsyncIOMotorClient
class Database:
client: AsyncIOMotorClient = None
db = Database()
async def get_database() -> AsyncIOMotorClient:
return db.client | 47 | 32 | 46 |
1873b48a08443ccfc17f1f4b9fdf04d22a02923d | 2,785 | py | Python | shrinky/glsl_block_struct.py | xyproto/shrinky | 8f318d2f62f8ef3cffae6bd5db1b36c95067aac6 | [
"BSD-3-Clause"
] | 11 | 2019-03-16T11:03:50.000Z | 2021-12-26T12:41:57.000Z | shrinky/glsl_block_struct.py | xyproto/shrinky | 8f318d2f62f8ef3cffae6bd5db1b36c95067aac6 | [
"BSD-3-Clause"
] | 1 | 2022-02-12T16:22:37.000Z | 2022-02-12T16:22:37.000Z | shrinky/glsl_block_struct.py | xyproto/shrinky | 8f318d2f62f8ef3cffae6bd5db1b36c95067aac6 | [
"BSD-3-Clause"
] | null | null | null | from shrinky.glsl_block import GlslBlock
from shrinky.glsl_block import extract_tokens
from shrinky.glsl_block_member import glsl_parse_member_list
########################################
# GlslBlockStruct ######################
########################################
class GlslBlockStruct(GlslBlock):
"""Struct declaration."""
def __init__(self, type_name, members, name=None, size=0):
"""Constructor."""
GlslBlock.__init__(self)
self.__type_name = type_name
self.__members = members
self.__name = name
self.__size = size
self.__member_accesses = []
# Hierarchy.
name.setType(type_name)
self.addNamesDeclared(name)
self.addNamesUsed(name)
def format(self, force):
"""Return formatted output."""
lst = "".join([x.format(force) for x in self.__members])
ret = ("struct %s{%s}" % (self.__type_name.format(force), lst, self.__name.format(force)))
if self.__name:
ret += self.__name.format(force)
if self.__size:
ret += "[%s]" % (self.__size.format(force))
return ret + ";"
def getMembers(self):
"""Accessor."""
return self.__members
def getMemberAccesses(self):
"""Accessor."""
return self.__member_accesses
def getName(self):
"""Accessor."""
return self.__name
def getTypeName(self):
"""Accessor."""
return self.__type_name
def setMemberAccesses(self, lst):
"""Set collected member accesses."""
self.__member_accesses = lst
def __str__(self):
"""String representation."""
return "Struct(%i)" % (len(self.__content))
########################################
# Functions ############################
########################################
def glsl_parse_struct(source):
"""Parse struct block."""
(type_name, scope, content) = extract_tokens(source, ("struct", "?n", "?{"))
if not type_name:
return (None, source)
# Get potential name and size.
(name, size, remaining) = extract_tokens(content, ("?n", "[", "?i", "]", ";"))
if not name:
size = None
(name, remaining) = extract_tokens(content, ("?n", ";"))
if not name:
name = None
(terminator, remaining) = extract_tokens(content, ("?;",))
if not terminator:
return (None, source)
# Parse members
members = glsl_parse_member_list(scope)
if not members:
raise RuntimeError("empty member list for struct")
return (GlslBlockStruct(type_name, members, name, size), remaining)
def is_glsl_block_struct(op):
"""Tell if given object is GlslBlockInout."""
return isinstance(op, GlslBlockStruct)
| 30.944444 | 98 | 0.560144 | from shrinky.glsl_block import GlslBlock
from shrinky.glsl_block import extract_tokens
from shrinky.glsl_block_member import glsl_parse_member_list
########################################
# GlslBlockStruct ######################
########################################
class GlslBlockStruct(GlslBlock):
"""Struct declaration."""
def __init__(self, type_name, members, name=None, size=0):
"""Constructor."""
GlslBlock.__init__(self)
self.__type_name = type_name
self.__members = members
self.__name = name
self.__size = size
self.__member_accesses = []
# Hierarchy.
name.setType(type_name)
self.addNamesDeclared(name)
self.addNamesUsed(name)
def format(self, force):
"""Return formatted output."""
lst = "".join([x.format(force) for x in self.__members])
ret = ("struct %s{%s}" % (self.__type_name.format(force), lst, self.__name.format(force)))
if self.__name:
ret += self.__name.format(force)
if self.__size:
ret += "[%s]" % (self.__size.format(force))
return ret + ";"
def getMembers(self):
"""Accessor."""
return self.__members
def getMemberAccesses(self):
"""Accessor."""
return self.__member_accesses
def getName(self):
"""Accessor."""
return self.__name
def getTypeName(self):
"""Accessor."""
return self.__type_name
def setMemberAccesses(self, lst):
"""Set collected member accesses."""
self.__member_accesses = lst
def __str__(self):
"""String representation."""
return "Struct(%i)" % (len(self.__content))
########################################
# Functions ############################
########################################
def glsl_parse_struct(source):
"""Parse struct block."""
(type_name, scope, content) = extract_tokens(source, ("struct", "?n", "?{"))
if not type_name:
return (None, source)
# Get potential name and size.
(name, size, remaining) = extract_tokens(content, ("?n", "[", "?i", "]", ";"))
if not name:
size = None
(name, remaining) = extract_tokens(content, ("?n", ";"))
if not name:
name = None
(terminator, remaining) = extract_tokens(content, ("?;",))
if not terminator:
return (None, source)
# Parse members
members = glsl_parse_member_list(scope)
if not members:
raise RuntimeError("empty member list for struct")
return (GlslBlockStruct(type_name, members, name, size), remaining)
def is_glsl_block_struct(op):
"""Tell if given object is GlslBlockInout."""
return isinstance(op, GlslBlockStruct)
| 0 | 0 | 0 |
1a50f81d932843336b3e69b5e740b3960191e4df | 638 | py | Python | wtiproj04/zad_6_mean_user.py | kwitnacy/wti | 9ee659bf2912f5b2fe6229bbda5074d4f3ebb3d4 | [
"Apache-2.0"
] | null | null | null | wtiproj04/zad_6_mean_user.py | kwitnacy/wti | 9ee659bf2912f5b2fe6229bbda5074d4f3ebb3d4 | [
"Apache-2.0"
] | null | null | null | wtiproj04/zad_6_mean_user.py | kwitnacy/wti | 9ee659bf2912f5b2fe6229bbda5074d4f3ebb3d4 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import numpy as np
from typing import List
user_ID = 78
query = 'userID == ' + str(user_ID)
# head = my_join()
head = ['Action', 'Adventure', 'Animation', 'Children', 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', 'IMAX', 'Musical', 'Mystery', 'Romance', 'Sci-Fi', 'Short', 'Thriller', 'War', 'Western']
joined = pd.read_csv('/home/kwitnoncy/Documents/politechnika/wti/wtiproj03/data/joined.dat', sep='\t')
joined = joined.query(query).to_numpy()
print([np.nanmean([(row[2] * row[genre + 9]) if row[genre + 9] != 0 else np.nan for row in joined]) for genre in range(len(head))])
| 39.875 | 222 | 0.661442 | import pandas as pd
import numpy as np
from typing import List
user_ID = 78
query = 'userID == ' + str(user_ID)
# head = my_join()
head = ['Action', 'Adventure', 'Animation', 'Children', 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', 'IMAX', 'Musical', 'Mystery', 'Romance', 'Sci-Fi', 'Short', 'Thriller', 'War', 'Western']
joined = pd.read_csv('/home/kwitnoncy/Documents/politechnika/wti/wtiproj03/data/joined.dat', sep='\t')
joined = joined.query(query).to_numpy()
print([np.nanmean([(row[2] * row[genre + 9]) if row[genre + 9] != 0 else np.nan for row in joined]) for genre in range(len(head))])
| 0 | 0 | 0 |
7a5e99161a963ae3dbd40cd45ad6fc98263f2ffb | 1,698 | py | Python | uPython/clock/main.py | c3dprk/split-flap | 0b4a8bc4e540f38ad24d0b790ed6268a0b832a2f | [
"MIT"
] | 13 | 2017-12-11T12:25:49.000Z | 2022-03-02T11:44:09.000Z | uPython/clock/main.py | c3dprk/split-flap | 0b4a8bc4e540f38ad24d0b790ed6268a0b832a2f | [
"MIT"
] | null | null | null | uPython/clock/main.py | c3dprk/split-flap | 0b4a8bc4e540f38ad24d0b790ed6268a0b832a2f | [
"MIT"
] | 6 | 2018-01-01T16:15:48.000Z | 2020-04-07T23:44:03.000Z | from machine import Pin, RTC
from time import sleep, sleep_ms, sleep_us
from json import load as jload
from ntptime import settime
ADDR_DELAY_US = 100
GOTO_DELAY_MS = 20
HOUR_LUT = jload(open("hour_lut.json"))
MINUTE_LUT = jload(open("minute_lut.json"))
TIME_ZONE = +2
encoder_pins = [Pin(i, Pin.IN, Pin.PULL_UP) for i in [0, 2, 4, 5, 12, 13]]
run = Pin(16, Pin.OUT)
run.off()
adc_hour = Pin(14, Pin.OUT)
adc_hour.off()
adc_minute = Pin(15, Pin.OUT)
adc_minute.off()
try:
settime()
except:
pass
rtc = RTC()
while True:
update_time()
sleep(5)
| 19.744186 | 74 | 0.59894 | from machine import Pin, RTC
from time import sleep, sleep_ms, sleep_us
from json import load as jload
from ntptime import settime
ADDR_DELAY_US = 100
GOTO_DELAY_MS = 20
HOUR_LUT = jload(open("hour_lut.json"))
MINUTE_LUT = jload(open("minute_lut.json"))
TIME_ZONE = +2
encoder_pins = [Pin(i, Pin.IN, Pin.PULL_UP) for i in [0, 2, 4, 5, 12, 13]]
run = Pin(16, Pin.OUT)
run.off()
adc_hour = Pin(14, Pin.OUT)
adc_hour.off()
adc_minute = Pin(15, Pin.OUT)
adc_minute.off()
try:
settime()
except:
pass
rtc = RTC()
def select(adc, adl):
if None != adc:
adc.on()
if None != adl:
adl.on()
def deselect(adc, adl):
if None != adc:
adc.off()
if None != adl:
adl.off()
def start(adc, adl):
select(adc, adl)
run.on()
sleep_us(ADDR_DELAY_US)
deselect(adc, adl)
def stop(adc, adl):
select(adc, adl)
run.off()
sleep_us(ADDR_DELAY_US)
deselect(adc, adl)
def get_pos(adc, adl):
select(adc, adl)
sleep_us(ADDR_DELAY_US)
retval = 0
for pin in encoder_pins:
retval <<= 1
retval += pin.value()
deselect(adc, adl)
return retval
def go_to(adc, adl, val):
while get_pos(adc, adl) != val:
start(adc, adl)
sleep_ms(GOTO_DELAY_MS)
stop(adc, adl)
def update_time():
hour, minute = rtc.datetime()[4:6]
hour = (hour+int(TIME_ZONE))%24
if 0 == minute:
try:
settime()
except:
pass
go_to(adc_hour, None, HOUR_LUT[hour])
go_to(adc_minute, None, MINUTE_LUT[minute])
def test_62(adc, adl):
for i in range(62, 0, -1):
go_to(adc, adl, i)
print(i)
while True:
update_time()
sleep(5)
| 951 | 0 | 184 |
5cfc29885daabd72bd7e5d74a2904b6dbfab5b1f | 866 | py | Python | conftest.py | MatPoliquin/retro | c70c174a9818d1e97bc36e61abb4694d28fc68e1 | [
"MIT-0",
"MIT"
] | 2,706 | 2018-04-05T18:28:50.000Z | 2022-03-29T16:56:59.000Z | conftest.py | MatPoliquin/retro | c70c174a9818d1e97bc36e61abb4694d28fc68e1 | [
"MIT-0",
"MIT"
] | 242 | 2018-04-05T22:30:42.000Z | 2022-03-19T01:55:11.000Z | conftest.py | MatPoliquin/retro | c70c174a9818d1e97bc36e61abb4694d28fc68e1 | [
"MIT-0",
"MIT"
] | 464 | 2018-04-05T19:10:34.000Z | 2022-03-28T13:33:32.000Z | import pytest
import retro.data
inttypes = {
'exp': retro.data.Integrations.EXPERIMENTAL_ONLY,
'contrib': retro.data.Integrations.CONTRIB_ONLY,
}
| 32.074074 | 129 | 0.572748 | import pytest
import retro.data
inttypes = {
'exp': retro.data.Integrations.EXPERIMENTAL_ONLY,
'contrib': retro.data.Integrations.CONTRIB_ONLY,
}
def pytest_collection_modifyitems(items):
def test(*args, **kwargs):
print(kwargs)
return False
for item in items:
if item.originalname in ('test_load', 'test_rom', 'test_state', 'test_hash'):
for key in item.keywords.keys():
if '[' + key + ']' not in item.nodeid:
continue
game = key.split('_')
gamename = '%s-%s' % (game[0], game[1])
try:
retro.data.get_romfile_path(gamename, inttypes[game[2]] if len(game) > 2 else retro.data.Integrations.STABLE)
except (FileNotFoundError, KeyError):
item.add_marker(pytest.mark.skip)
| 687 | 0 | 23 |
67146385598397f73fdfde407de56e453e1380c1 | 266 | py | Python | examples/futures/market/funding_rate.py | AlfonsoAgAr/binance-futures-connector-python | f0bd2c7b0576503bf526ce6be329ca2dae90fefe | [
"MIT"
] | 58 | 2021-11-22T11:46:27.000Z | 2022-03-30T06:58:53.000Z | examples/futures/market/funding_rate.py | sanjeevan121/binance-futures-connector-python | d820b73a15e9f64c80891a13694ca0c5d1693b90 | [
"MIT"
] | 15 | 2021-12-15T22:40:52.000Z | 2022-03-29T22:08:31.000Z | examples/futures/market/funding_rate.py | sanjeevan121/binance-futures-connector-python | d820b73a15e9f64c80891a13694ca0c5d1693b90 | [
"MIT"
] | 28 | 2021-12-10T03:56:13.000Z | 2022-03-25T22:23:44.000Z | #!/usr/bin/env python
from binance.futures import Futures as Client
import logging
from binance.lib.utils import config_logging
config_logging(logging, logging.DEBUG)
futures_client = Client()
logging.info(futures_client.funding_rate("BTCUSDT",**{'limit':100}))
| 22.166667 | 68 | 0.793233 | #!/usr/bin/env python
from binance.futures import Futures as Client
import logging
from binance.lib.utils import config_logging
config_logging(logging, logging.DEBUG)
futures_client = Client()
logging.info(futures_client.funding_rate("BTCUSDT",**{'limit':100}))
| 0 | 0 | 0 |
6a497cd03b5c018bcfd778acb4c7bc5cc9a8d147 | 7,904 | py | Python | perfkitbenchmarker/providers/aws/aws_capacity_reservation.py | Nowasky/PerfKitBenchmarker | cfa88e269eb373780910896ed4bdc8db09469753 | [
"Apache-2.0"
] | 3 | 2018-04-28T13:06:14.000Z | 2020-06-09T02:39:44.000Z | perfkitbenchmarker/providers/aws/aws_capacity_reservation.py | Nowasky/PerfKitBenchmarker | cfa88e269eb373780910896ed4bdc8db09469753 | [
"Apache-2.0"
] | 1 | 2021-09-09T07:43:25.000Z | 2021-09-09T10:47:56.000Z | perfkitbenchmarker/providers/aws/aws_capacity_reservation.py | Nowasky/PerfKitBenchmarker | cfa88e269eb373780910896ed4bdc8db09469753 | [
"Apache-2.0"
] | 6 | 2019-06-11T18:59:57.000Z | 2021-03-02T19:14:42.000Z | # Copyright 2019 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CapacityReservation for AWS virtual machines.
AWS EC2 has the concept of capacity reservations which allow the
user to request a reservation for a given number of VMs of a
specified shape (machine type and os type) in a given zone, for
an optionally-supplied duration. This module implements this functionaly.
A useful feature of using AwsCapacityReservation is that it allows the
user to specify a region instead of a zone, and this module will automatically
pick a zone that has capacity, and the VM(s) will then be launched in that zone.
AwsCapacityReservation modifies all the VMs in a given vm_group in the
following way:
1. The capacity_reservation_id attribute on the VM is set after the
reservation is created. The VM needs to reference this id during
creation.
2. If the user supplied a region instead of zone, then this module
will update the zone attribute on the VM, as well as the zone
attribute on the VM's network instance.
A run of PKB may have several capacity reservations; there is a 1:1 mapping
from AWS vm_groups to AwsCapacityReservation instances. This is because all
VMs in a VM group share the same shape and zone.
"""
import datetime
import json
import logging
from absl import flags
from perfkitbenchmarker import capacity_reservation
from perfkitbenchmarker import errors
from perfkitbenchmarker import os_types
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers import aws
from perfkitbenchmarker.providers.aws import util
FLAGS = flags.FLAGS
_INSUFFICIENT_CAPACITY = 'InsufficientInstanceCapacity'
class AwsCapacityReservation(capacity_reservation.BaseCapacityReservation):
"""An object representing an AWS EC2 CapacityReservation."""
CLOUD = aws.CLOUD
def _Create(self):
"""Creates the AWS CapacaityReservation.
A reservation will be created given the VM shape in self.vm_groups.
Count is determined by the number of VMs in said group. The reservation
will have a lifetime determined by the general PKB concept of
timeout_minutes. If the reservation exceeds this timeout, AWS will
cancel it automatically. The VMs in the reservation will not be deleted.
Note that an empty capacity reservation will encur costs for the
VM shape / count, even if no VMs are using it.
After the reservation is created, this method updates all the VMs
in self.vm_groups by setting the capacity_reservation_id, as well
as the zone attributes on the VM, and the VM's network instance.
Raises:
UnsupportedOsTypeError: If creating a capacity reservation for the
given os type is not supported.
CreationError: If a capacity reservation cannot be created in the
region (typically indicates a stockout).
"""
if self.os_type in os_types.LINUX_OS_TYPES:
instance_platform = 'Linux/UNIX'
elif self.os_type in os_types.WINDOWS_OS_TYPES:
instance_platform = 'Windows'
else:
raise UnsupportedOsTypeError(
'Unsupported os_type for AWS CapacityReservation: %s.'
% self.os_type)
# If the user did not specify an AZ, we need to try to create the
# CapacityReservation in a specifc AZ until it succeeds.
# Then update the zone attribute on all the VMs in the group,
# as well as the zone attribute on the VMs' network instance.
if util.IsRegion(self.zone_or_region):
zones_to_try = util.GetZonesInRegion(self.region)
else:
zones_to_try = [self.zone_or_region]
end_date = (
datetime.datetime.utcnow() +
datetime.timedelta(minutes=FLAGS.timeout_minutes))
for zone in zones_to_try:
cmd = util.AWS_PREFIX + [
'ec2',
'create-capacity-reservation',
'--instance-type=%s' % self.machine_type,
'--instance-platform=%s' % instance_platform,
'--availability-zone=%s' % zone,
'--instance-count=%s' % self.vm_count,
'--instance-match-criteria=targeted',
'--region=%s' % self.region,
'--end-date-type=limited',
'--end-date=%s' % end_date.isoformat(),
]
stdout, stderr, retcode = vm_util.IssueCommand(cmd,
raise_on_failure=False)
if retcode:
logging.info('Unable to create CapacityReservation in %s. '
'This may be retried. Details: %s', zone, stderr)
if _INSUFFICIENT_CAPACITY in stderr:
logging.error(util.STOCKOUT_MESSAGE)
raise errors.Benchmarks.InsufficientCapacityCloudFailure(
util.STOCKOUT_MESSAGE + ' CapacityReservation in ' + zone)
continue
json_output = json.loads(stdout)
self.capacity_reservation_id = (
json_output['CapacityReservation']['CapacityReservationId'])
self._UpdateVmsInGroup(self.capacity_reservation_id, zone)
return
raise CreationError('Unable to create CapacityReservation in any of the '
'following zones: %s.' % zones_to_try)
def _Delete(self):
"""Deletes the capacity reservation."""
cmd = util.AWS_PREFIX + [
'ec2',
'cancel-capacity-reservation',
'--capacity-reservation-id=%s' % self.capacity_reservation_id,
'--region=%s' % self.region,
]
vm_util.IssueCommand(cmd, raise_on_failure=False)
def _Exists(self):
"""Returns true if the underlying reservation exists and is active."""
cmd = util.AWS_PREFIX + [
'ec2',
'describe-capacity-reservations',
'--capacity-reservation-id=%s' % self.capacity_reservation_id,
'--region=%s' % self.region,
]
stdout, _, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False)
if retcode != 0:
return False
json_output = json.loads(stdout)
return json_output['CapacityReservations'][0]['State'] == 'active'
def _UpdateVmsInGroup(self, capacity_reservation_id, zone):
"""Updates the VMs in a group with necessary reservation details.
AWS virtual machines need to reference the capacity reservation id
during creation, so it is set on all VMs in the group. Additionally,
this class may determine which zone to run in, so that needs to be
updated too (on the VM, and the VM's network instance).
Args:
capacity_reservation_id: ID of the reservation created by this instance.
zone: Zone chosen by this class, or if it was supplied, the zone
provided by the user. In the latter case, setting the zone is equivalent
to a no-op.
"""
for vm in self.vm_group:
vm.capacity_reservation_id = capacity_reservation_id
vm.zone = zone
vm.network.zone = zone
| 39.52 | 80 | 0.711285 | # Copyright 2019 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CapacityReservation for AWS virtual machines.
AWS EC2 has the concept of capacity reservations which allow the
user to request a reservation for a given number of VMs of a
specified shape (machine type and os type) in a given zone, for
an optionally-supplied duration. This module implements this functionaly.
A useful feature of using AwsCapacityReservation is that it allows the
user to specify a region instead of a zone, and this module will automatically
pick a zone that has capacity, and the VM(s) will then be launched in that zone.
AwsCapacityReservation modifies all the VMs in a given vm_group in the
following way:
1. The capacity_reservation_id attribute on the VM is set after the
reservation is created. The VM needs to reference this id during
creation.
2. If the user supplied a region instead of zone, then this module
will update the zone attribute on the VM, as well as the zone
attribute on the VM's network instance.
A run of PKB may have several capacity reservations; there is a 1:1 mapping
from AWS vm_groups to AwsCapacityReservation instances. This is because all
VMs in a VM group share the same shape and zone.
"""
import datetime
import json
import logging
from absl import flags
from perfkitbenchmarker import capacity_reservation
from perfkitbenchmarker import errors
from perfkitbenchmarker import os_types
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers import aws
from perfkitbenchmarker.providers.aws import util
FLAGS = flags.FLAGS
_INSUFFICIENT_CAPACITY = 'InsufficientInstanceCapacity'
class InvalidVmGroupSizeError(Exception):
pass
class UnsupportedOsTypeError(Exception):
pass
class CreationError(Exception):
pass
class AwsCapacityReservation(capacity_reservation.BaseCapacityReservation):
"""An object representing an AWS EC2 CapacityReservation."""
CLOUD = aws.CLOUD
def __init__(self, vm_group):
if not vm_group:
raise InvalidVmGroupSizeError(
'AwsCapacityReservation must be initialized with at least one '
'VM in the vm_group.')
super(AwsCapacityReservation, self).__init__(vm_group)
self.zone_or_region = vm_group[0].zone
self.region = util.GetRegionFromZone(self.zone_or_region)
self.machine_type = vm_group[0].machine_type
self.os_type = vm_group[0].OS_TYPE
self.vm_count = len(vm_group)
def _Create(self):
"""Creates the AWS CapacaityReservation.
A reservation will be created given the VM shape in self.vm_groups.
Count is determined by the number of VMs in said group. The reservation
will have a lifetime determined by the general PKB concept of
timeout_minutes. If the reservation exceeds this timeout, AWS will
cancel it automatically. The VMs in the reservation will not be deleted.
Note that an empty capacity reservation will encur costs for the
VM shape / count, even if no VMs are using it.
After the reservation is created, this method updates all the VMs
in self.vm_groups by setting the capacity_reservation_id, as well
as the zone attributes on the VM, and the VM's network instance.
Raises:
UnsupportedOsTypeError: If creating a capacity reservation for the
given os type is not supported.
CreationError: If a capacity reservation cannot be created in the
region (typically indicates a stockout).
"""
if self.os_type in os_types.LINUX_OS_TYPES:
instance_platform = 'Linux/UNIX'
elif self.os_type in os_types.WINDOWS_OS_TYPES:
instance_platform = 'Windows'
else:
raise UnsupportedOsTypeError(
'Unsupported os_type for AWS CapacityReservation: %s.'
% self.os_type)
# If the user did not specify an AZ, we need to try to create the
# CapacityReservation in a specifc AZ until it succeeds.
# Then update the zone attribute on all the VMs in the group,
# as well as the zone attribute on the VMs' network instance.
if util.IsRegion(self.zone_or_region):
zones_to_try = util.GetZonesInRegion(self.region)
else:
zones_to_try = [self.zone_or_region]
end_date = (
datetime.datetime.utcnow() +
datetime.timedelta(minutes=FLAGS.timeout_minutes))
for zone in zones_to_try:
cmd = util.AWS_PREFIX + [
'ec2',
'create-capacity-reservation',
'--instance-type=%s' % self.machine_type,
'--instance-platform=%s' % instance_platform,
'--availability-zone=%s' % zone,
'--instance-count=%s' % self.vm_count,
'--instance-match-criteria=targeted',
'--region=%s' % self.region,
'--end-date-type=limited',
'--end-date=%s' % end_date.isoformat(),
]
stdout, stderr, retcode = vm_util.IssueCommand(cmd,
raise_on_failure=False)
if retcode:
logging.info('Unable to create CapacityReservation in %s. '
'This may be retried. Details: %s', zone, stderr)
if _INSUFFICIENT_CAPACITY in stderr:
logging.error(util.STOCKOUT_MESSAGE)
raise errors.Benchmarks.InsufficientCapacityCloudFailure(
util.STOCKOUT_MESSAGE + ' CapacityReservation in ' + zone)
continue
json_output = json.loads(stdout)
self.capacity_reservation_id = (
json_output['CapacityReservation']['CapacityReservationId'])
self._UpdateVmsInGroup(self.capacity_reservation_id, zone)
return
raise CreationError('Unable to create CapacityReservation in any of the '
'following zones: %s.' % zones_to_try)
def _Delete(self):
"""Deletes the capacity reservation."""
cmd = util.AWS_PREFIX + [
'ec2',
'cancel-capacity-reservation',
'--capacity-reservation-id=%s' % self.capacity_reservation_id,
'--region=%s' % self.region,
]
vm_util.IssueCommand(cmd, raise_on_failure=False)
def _Exists(self):
"""Returns true if the underlying reservation exists and is active."""
cmd = util.AWS_PREFIX + [
'ec2',
'describe-capacity-reservations',
'--capacity-reservation-id=%s' % self.capacity_reservation_id,
'--region=%s' % self.region,
]
stdout, _, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False)
if retcode != 0:
return False
json_output = json.loads(stdout)
return json_output['CapacityReservations'][0]['State'] == 'active'
def _UpdateVmsInGroup(self, capacity_reservation_id, zone):
"""Updates the VMs in a group with necessary reservation details.
AWS virtual machines need to reference the capacity reservation id
during creation, so it is set on all VMs in the group. Additionally,
this class may determine which zone to run in, so that needs to be
updated too (on the VM, and the VM's network instance).
Args:
capacity_reservation_id: ID of the reservation created by this instance.
zone: Zone chosen by this class, or if it was supplied, the zone
provided by the user. In the latter case, setting the zone is equivalent
to a no-op.
"""
for vm in self.vm_group:
vm.capacity_reservation_id = capacity_reservation_id
vm.zone = zone
vm.network.zone = zone
| 460 | 70 | 94 |
cb4016d8c942b829044e83083af5b694d65e6752 | 519 | py | Python | jieba-0.38/jieba/analyse/__init__.py | scmsqhn/changhongmall | 41ce7a42e6484be0e4d4d3912b9e23b96a281d4a | [
"MIT"
] | 2 | 2016-02-13T08:36:45.000Z | 2017-05-07T22:43:34.000Z | jieba-0.38/jieba/analyse/__init__.py | scmsqhn/changhongmall | 41ce7a42e6484be0e4d4d3912b9e23b96a281d4a | [
"MIT"
] | null | null | null | jieba-0.38/jieba/analyse/__init__.py | scmsqhn/changhongmall | 41ce7a42e6484be0e4d4d3912b9e23b96a281d4a | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from .tfidf import TFIDF
from .textrank import TextRank
try:
from .analyzer import ChineseAnalyzer
except ImportError:
pass
default_tfidf = TFIDF()
default_textrank = TextRank()
extract_tags = tfidf = default_tfidf.extract_tags
set_idf_path = default_tfidf.set_idf_path
textrank = default_textrank.extract_tags
| 27.315789 | 53 | 0.797688 | from __future__ import absolute_import
from .tfidf import TFIDF
from .textrank import TextRank
try:
from .analyzer import ChineseAnalyzer
except ImportError:
pass
default_tfidf = TFIDF()
default_textrank = TextRank()
extract_tags = tfidf = default_tfidf.extract_tags
set_idf_path = default_tfidf.set_idf_path
textrank = default_textrank.extract_tags
def set_stop_words(stop_words_path):
default_tfidf.set_stop_words(stop_words_path)
default_textrank.set_stop_words(stop_words_path)
| 120 | 0 | 25 |
267f7a0745f4981f02d8619bb346037688232eb9 | 12,792 | py | Python | elastica/rod/factory_function.py | bhosale2/PyElastica | 520374672cbd6b0c89a912c5019559e66c5535e3 | [
"MIT"
] | null | null | null | elastica/rod/factory_function.py | bhosale2/PyElastica | 520374672cbd6b0c89a912c5019559e66c5535e3 | [
"MIT"
] | null | null | null | elastica/rod/factory_function.py | bhosale2/PyElastica | 520374672cbd6b0c89a912c5019559e66c5535e3 | [
"MIT"
] | null | null | null | __doc__ = """ Factory function to allocate variables for Cosserat Rod"""
__all__ = ["allocate"]
import typing
from typing import Optional, Tuple
import warnings
import logging
import numpy as np
from numpy.testing import assert_allclose
from elastica.utils import MaxDimension, Tolerance
from elastica._linalg import _batch_cross, _batch_norm, _batch_dot
def _position_validity_checker(position, start, n_elements):
"""Checker on user-defined position validity"""
_assert_shape(position, (MaxDimension.value(), n_elements + 1), "position")
# Check if the start position of the rod and first entry of position array are the same
assert_allclose(
position[..., 0],
start,
atol=Tolerance.atol(),
err_msg=str(
"First entry of position" + " (" + str(position[..., 0]) + " ) "
" is different than start " + " (" + str(start) + " ) "
),
)
def _directors_validity_checker(directors, tangents, n_elements):
"""Checker on user-defined directors validity"""
_assert_shape(
directors, (MaxDimension.value(), MaxDimension.value(), n_elements), "directors"
)
# Check if d1, d2, d3 are unit vectors
d1 = directors[0, ...]
d2 = directors[1, ...]
d3 = directors[2, ...]
assert_allclose(
_batch_norm(d1),
np.ones((n_elements)),
atol=Tolerance.atol(),
err_msg=(" d1 vector of input director matrix is not unit vector "),
)
assert_allclose(
_batch_norm(d2),
np.ones((n_elements)),
atol=Tolerance.atol(),
err_msg=(" d2 vector of input director matrix is not unit vector "),
)
assert_allclose(
_batch_norm(d3),
np.ones((n_elements)),
atol=Tolerance.atol(),
err_msg=(" d3 vector of input director matrix is not unit vector "),
)
# Check if d3xd1 = d2
assert_allclose(
_batch_cross(d3, d1),
d2,
atol=Tolerance.atol(),
err_msg=(" d3 x d1 != d2 of input director matrix"),
)
# Check if computed tangents from position is the same with d3
assert_allclose(
tangents,
d3,
atol=Tolerance.atol(),
err_msg=" Tangent vector computed using node positions is different than d3 vector of input directors",
)
| 34.021277 | 123 | 0.636413 | __doc__ = """ Factory function to allocate variables for Cosserat Rod"""
__all__ = ["allocate"]
import typing
from typing import Optional, Tuple
import warnings
import logging
import numpy as np
from numpy.testing import assert_allclose
from elastica.utils import MaxDimension, Tolerance
from elastica._linalg import _batch_cross, _batch_norm, _batch_dot
def allocate(
n_elements,
start,
direction,
normal,
base_length,
base_radius,
density,
nu,
youngs_modulus: float,
nu_for_torques: Optional[float] = None,
shear_modulus: Optional[float] = None,
position: Optional[np.ndarray] = None,
directors: Optional[np.ndarray] = None,
rest_sigma: Optional[np.ndarray] = None,
rest_kappa: Optional[np.ndarray] = None,
*args,
**kwargs,
):
log = logging.getLogger()
if "poisson_ratio" in kwargs:
# Deprecation warning for poission_ratio
raise NameError(
"Poisson's ratio is deprecated for Cosserat Rod for clarity. Please provide shear_modulus instead."
)
# sanity checks here
assert n_elements > 1
assert base_length > Tolerance.atol()
assert np.sqrt(np.dot(normal, normal)) > Tolerance.atol()
assert np.sqrt(np.dot(direction, direction)) > Tolerance.atol()
# check if position is given.
if position is None: # Generate straight and uniform rod
# Set the position array
position = np.zeros((MaxDimension.value(), n_elements + 1))
end = start + direction * base_length
for i in range(0, 3):
position[i, ...] = np.linspace(start[i], end[i], n_elements + 1)
_position_validity_checker(position, start, n_elements)
# Compute rest lengths and tangents
position_diff = position[..., 1:] - position[..., :-1]
rest_lengths = _batch_norm(position_diff)
tangents = position_diff / rest_lengths
normal /= np.linalg.norm(normal)
if directors is None: # Generate straight uniform rod
# Set the directors matrix
directors = np.zeros((MaxDimension.value(), MaxDimension.value(), n_elements))
# Construct directors using tangents and normal
normal_collection = np.repeat(normal[:, np.newaxis], n_elements, axis=1)
# Check if rod normal and rod tangent are perpendicular to each other otherwise
# directors will be wrong!!
assert_allclose(
_batch_dot(normal_collection, tangents),
0,
atol=Tolerance.atol(),
err_msg=(" Rod normal and tangent are not perpendicular to each other!"),
)
directors[0, ...] = normal_collection
directors[1, ...] = _batch_cross(tangents, normal_collection)
directors[2, ...] = tangents
_directors_validity_checker(directors, tangents, n_elements)
# Set radius array
radius = np.zeros((n_elements))
# Check if the user input radius is valid
radius_temp = np.array(base_radius)
_assert_dim(radius_temp, 2, "radius")
radius[:] = radius_temp
# Check if the elements of radius are greater than tolerance
assert np.all(radius > Tolerance.atol()), " Radius has to be greater than 0."
# Set density array
density_array = np.zeros((n_elements))
# Check if the user input density is valid
density_temp = np.array(density)
_assert_dim(density_temp, 2, "density")
density_array[:] = density_temp
# Check if the elements of density are greater than tolerance
assert np.all(
density_array > Tolerance.atol()
), " Density has to be greater than 0."
# Second moment of inertia
A0 = np.pi * radius * radius
I0_1 = A0 * A0 / (4.0 * np.pi)
I0_2 = I0_1
I0_3 = 2.0 * I0_2
I0 = np.array([I0_1, I0_2, I0_3]).transpose()
# Mass second moment of inertia for disk cross-section
mass_second_moment_of_inertia = np.zeros(
(MaxDimension.value(), MaxDimension.value(), n_elements), np.float64
)
mass_second_moment_of_inertia_temp = np.einsum(
"ij,i->ij", I0, density * rest_lengths
)
for i in range(n_elements):
np.fill_diagonal(
mass_second_moment_of_inertia[..., i],
mass_second_moment_of_inertia_temp[i, :],
)
# sanity check of mass second moment of inertia
if (mass_second_moment_of_inertia < Tolerance.atol()).all():
message = "Mass moment of inertia matrix smaller than tolerance, please check provided radius, density and length."
log.warning(message)
# Inverse of second moment of inertia
inv_mass_second_moment_of_inertia = np.zeros(
(MaxDimension.value(), MaxDimension.value(), n_elements)
)
for i in range(n_elements):
# Check rank of mass moment of inertia matrix to see if it is invertible
assert (
np.linalg.matrix_rank(mass_second_moment_of_inertia[..., i])
== MaxDimension.value()
)
inv_mass_second_moment_of_inertia[..., i] = np.linalg.inv(
mass_second_moment_of_inertia[..., i]
)
# Shear/Stretch matrix
if not shear_modulus:
log.info(
"""Shear modulus is not explicitly given.\n
In such case, we compute shear_modulus assuming poisson's ratio of 0.5"""
)
shear_modulus = youngs_modulus / (2.0 * (1.0 + 0.5))
# Value taken based on best correlation for Poisson ratio = 0.5, from
# "On Timoshenko's correction for shear in vibrating beams" by Kaneko, 1975
alpha_c = 0.964
shear_matrix = np.zeros(
(MaxDimension.value(), MaxDimension.value(), n_elements), np.float64
)
for i in range(n_elements):
np.fill_diagonal(
shear_matrix[..., i],
[
alpha_c * shear_modulus * A0[i],
alpha_c * shear_modulus * A0[i],
youngs_modulus * A0[i],
],
)
# Bend/Twist matrix
bend_matrix = np.zeros(
(MaxDimension.value(), MaxDimension.value(), n_elements), np.float64
)
for i in range(n_elements):
np.fill_diagonal(
bend_matrix[..., i],
[
youngs_modulus * I0_1[i],
youngs_modulus * I0_2[i],
shear_modulus * I0_3[i],
],
)
for i in range(0, MaxDimension.value()):
assert np.all(
bend_matrix[i, i, :] > Tolerance.atol()
), " Bend matrix has to be greater than 0."
# Compute bend matrix in Voronoi Domain
bend_matrix = (
bend_matrix[..., 1:] * rest_lengths[1:]
+ bend_matrix[..., :-1] * rest_lengths[0:-1]
) / (rest_lengths[1:] + rest_lengths[:-1])
# Compute volume of elements
volume = np.pi * radius ** 2 * rest_lengths
# Compute mass of elements
mass = np.zeros(n_elements + 1)
mass[:-1] += 0.5 * density * volume
mass[1:] += 0.5 * density * volume
# Set dissipation constant or nu array
dissipation_constant_for_forces = np.zeros((n_elements))
# Check if the user input nu is valid
nu_temp = np.array(nu)
_assert_dim(nu_temp, 2, "dissipation constant (nu) for forces)")
dissipation_constant_for_forces[:] = nu
# Check if the elements of dissipation constant greater than tolerance
assert np.all(
dissipation_constant_for_forces >= 0.0
), " Dissipation constant(nu) has to be equal or greater than 0."
# Custom nu for torques
if nu_for_torques is None:
dissipation_constant_for_torques = dissipation_constant_for_forces.copy()
else:
dissipation_constant_for_torques = np.asarray(nu_for_torques)
_assert_dim(
dissipation_constant_for_torques, 2, "dissipation constant (nu) for torque)"
)
# Generate rest sigma and rest kappa, use user input if defined
# set rest strains and curvature to be zero at start
# if found in kwargs modify (say for curved rod)
if rest_sigma is None:
rest_sigma = np.zeros((MaxDimension.value(), n_elements))
_assert_shape(rest_sigma, (MaxDimension.value(), n_elements), "rest_sigma")
if rest_kappa is None:
rest_kappa = np.zeros((MaxDimension.value(), n_elements - 1))
_assert_shape(rest_kappa, (MaxDimension.value(), n_elements - 1), "rest_kappa")
# Compute rest voronoi length
rest_voronoi_lengths = 0.5 * (rest_lengths[1:] + rest_lengths[:-1])
# Allocate arrays for Cosserat Rod equations
velocities = np.zeros((MaxDimension.value(), n_elements + 1))
omegas = np.zeros((MaxDimension.value(), n_elements))
accelerations = 0.0 * velocities
angular_accelerations = 0.0 * omegas
# _vector_states = np.hstack(
# (position, velocities, omegas, accelerations, angular_accelerations)
# )
# _matrix_states = directors.copy()
internal_forces = 0.0 * accelerations
internal_torques = 0.0 * angular_accelerations
external_forces = 0.0 * accelerations
external_torques = 0.0 * angular_accelerations
lengths = np.zeros((n_elements))
tangents = np.zeros((3, n_elements))
dilatation = np.zeros((n_elements))
voronoi_dilatation = np.zeros((n_elements - 1))
dilatation_rate = np.zeros((n_elements))
sigma = np.zeros((3, n_elements))
kappa = np.zeros((3, n_elements - 1))
internal_stress = np.zeros((3, n_elements))
internal_couple = np.zeros((3, n_elements - 1))
damping_forces = np.zeros((3, n_elements + 1))
damping_torques = np.zeros((3, n_elements))
return (
n_elements,
position,
velocities,
omegas,
accelerations,
angular_accelerations,
directors,
radius,
mass_second_moment_of_inertia,
inv_mass_second_moment_of_inertia,
shear_matrix,
bend_matrix,
density_array,
volume,
mass,
dissipation_constant_for_forces,
dissipation_constant_for_torques,
internal_forces,
internal_torques,
external_forces,
external_torques,
lengths,
rest_lengths,
tangents,
dilatation,
dilatation_rate,
voronoi_dilatation,
rest_voronoi_lengths,
sigma,
kappa,
rest_sigma,
rest_kappa,
internal_stress,
internal_couple,
damping_forces,
damping_torques,
)
def _assert_dim(vector, max_dim: int, name: str):
assert vector.ndim < max_dim, (
f"Input {name} dimension is not correct {vector.shape}"
+ f" It should be maximum {max_dim}D vector or single floating number."
)
def _assert_shape(array: np.ndarray, expected_shape: Tuple[int], name: str):
assert array.shape == expected_shape, (
f"Given {name} shape is not correct, it should be "
+ str(expected_shape)
+ " but instead "
+ str(array.shape)
)
def _position_validity_checker(position, start, n_elements):
"""Checker on user-defined position validity"""
_assert_shape(position, (MaxDimension.value(), n_elements + 1), "position")
# Check if the start position of the rod and first entry of position array are the same
assert_allclose(
position[..., 0],
start,
atol=Tolerance.atol(),
err_msg=str(
"First entry of position" + " (" + str(position[..., 0]) + " ) "
" is different than start " + " (" + str(start) + " ) "
),
)
def _directors_validity_checker(directors, tangents, n_elements):
"""Checker on user-defined directors validity"""
_assert_shape(
directors, (MaxDimension.value(), MaxDimension.value(), n_elements), "directors"
)
# Check if d1, d2, d3 are unit vectors
d1 = directors[0, ...]
d2 = directors[1, ...]
d3 = directors[2, ...]
assert_allclose(
_batch_norm(d1),
np.ones((n_elements)),
atol=Tolerance.atol(),
err_msg=(" d1 vector of input director matrix is not unit vector "),
)
assert_allclose(
_batch_norm(d2),
np.ones((n_elements)),
atol=Tolerance.atol(),
err_msg=(" d2 vector of input director matrix is not unit vector "),
)
assert_allclose(
_batch_norm(d3),
np.ones((n_elements)),
atol=Tolerance.atol(),
err_msg=(" d3 vector of input director matrix is not unit vector "),
)
# Check if d3xd1 = d2
assert_allclose(
_batch_cross(d3, d1),
d2,
atol=Tolerance.atol(),
err_msg=(" d3 x d1 != d2 of input director matrix"),
)
# Check if computed tangents from position is the same with d3
assert_allclose(
tangents,
d3,
atol=Tolerance.atol(),
err_msg=" Tangent vector computed using node positions is different than d3 vector of input directors",
)
| 10,412 | 0 | 69 |
edebf6391ea5ad0a5b7782e9f778bed9cb045eb3 | 356 | py | Python | rl_vs_rand.py | yangmuzhi/wuziqi | 7bdee51ef2a37373b0823b00c4536138560ec3bb | [
"MIT"
] | null | null | null | rl_vs_rand.py | yangmuzhi/wuziqi | 7bdee51ef2a37373b0823b00c4536138560ec3bb | [
"MIT"
] | null | null | null | rl_vs_rand.py | yangmuzhi/wuziqi | 7bdee51ef2a37373b0823b00c4536138560ec3bb | [
"MIT"
] | null | null | null | from env_wrapper import wzq_env
from policy.rand_policy import rand_agent
from policy.algo.ppo import PPO
from policy.net.net_v0 import policy_net
from policy.net.net_v0 import value_net
from policy.rl_policy import ppo_agent
import time
import numpy as np
agent0 = ppo_agent(size=15)
agent1 = rand_agent(size=15)
env = wzq_env([agent0, agent1])
env.run() | 27.384615 | 41 | 0.814607 | from env_wrapper import wzq_env
from policy.rand_policy import rand_agent
from policy.algo.ppo import PPO
from policy.net.net_v0 import policy_net
from policy.net.net_v0 import value_net
from policy.rl_policy import ppo_agent
import time
import numpy as np
agent0 = ppo_agent(size=15)
agent1 = rand_agent(size=15)
env = wzq_env([agent0, agent1])
env.run() | 0 | 0 | 0 |
7f879e3d16877aed4273aa22fc10b7ba833b6e0c | 2,494 | py | Python | catalog/bindings/gmd/md_application_schema_information_type.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/gmd/md_application_schema_information_type.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/gmd/md_application_schema_information_type.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass, field
from typing import Optional
from bindings.gmd.abstract_object_type import AbstractObjectType
from bindings.gmd.binary_property_type import BinaryPropertyType
from bindings.gmd.character_string_property_type import CharacterStringPropertyType
from bindings.gmd.ci_citation_type import CiCitationPropertyType
__NAMESPACE__ = "http://www.isotc211.org/2005/gmd"
@dataclass
class MdApplicationSchemaInformationType(AbstractObjectType):
"""
Information about the application schema used to build the dataset.
"""
name: Optional[CiCitationPropertyType] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
"required": True,
},
)
schema_language: Optional[CharacterStringPropertyType] = field(
default=None,
metadata={
"name": "schemaLanguage",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
"required": True,
},
)
constraint_language: Optional[CharacterStringPropertyType] = field(
default=None,
metadata={
"name": "constraintLanguage",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
"required": True,
},
)
schema_ascii: Optional[CharacterStringPropertyType] = field(
default=None,
metadata={
"name": "schemaAscii",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
graphics_file: Optional[BinaryPropertyType] = field(
default=None,
metadata={
"name": "graphicsFile",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
software_development_file: Optional[BinaryPropertyType] = field(
default=None,
metadata={
"name": "softwareDevelopmentFile",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
software_development_file_format: Optional[CharacterStringPropertyType] = field(
default=None,
metadata={
"name": "softwareDevelopmentFileFormat",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
| 31.974359 | 84 | 0.607859 | from dataclasses import dataclass, field
from typing import Optional
from bindings.gmd.abstract_object_type import AbstractObjectType
from bindings.gmd.binary_property_type import BinaryPropertyType
from bindings.gmd.character_string_property_type import CharacterStringPropertyType
from bindings.gmd.ci_citation_type import CiCitationPropertyType
__NAMESPACE__ = "http://www.isotc211.org/2005/gmd"
@dataclass
class MdApplicationSchemaInformationType(AbstractObjectType):
"""
Information about the application schema used to build the dataset.
"""
class Meta:
name = "MD_ApplicationSchemaInformation_Type"
name: Optional[CiCitationPropertyType] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
"required": True,
},
)
schema_language: Optional[CharacterStringPropertyType] = field(
default=None,
metadata={
"name": "schemaLanguage",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
"required": True,
},
)
constraint_language: Optional[CharacterStringPropertyType] = field(
default=None,
metadata={
"name": "constraintLanguage",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
"required": True,
},
)
schema_ascii: Optional[CharacterStringPropertyType] = field(
default=None,
metadata={
"name": "schemaAscii",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
graphics_file: Optional[BinaryPropertyType] = field(
default=None,
metadata={
"name": "graphicsFile",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
software_development_file: Optional[BinaryPropertyType] = field(
default=None,
metadata={
"name": "softwareDevelopmentFile",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
software_development_file_format: Optional[CharacterStringPropertyType] = field(
default=None,
metadata={
"name": "softwareDevelopmentFileFormat",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
| 0 | 44 | 27 |
ff3169fa89df03f86c76a1e4195a8f9a48e7d51f | 5,733 | py | Python | semibandits/plotting_script.py | akshaykr/oracle_cb | 68f10fce5eca8ebe3f57fd5a56a0ef8d82537ab4 | [
"MIT"
] | 26 | 2017-08-02T19:58:06.000Z | 2021-11-03T06:31:01.000Z | semibandits/plotting_script.py | akshaykr/oracle_cb | 68f10fce5eca8ebe3f57fd5a56a0ef8d82537ab4 | [
"MIT"
] | 1 | 2020-03-03T06:06:32.000Z | 2020-03-03T06:06:32.000Z | semibandits/plotting_script.py | akshaykr/oracle_cb | 68f10fce5eca8ebe3f57fd5a56a0ef8d82537ab4 | [
"MIT"
] | 10 | 2017-06-02T19:34:38.000Z | 2022-03-22T10:38:51.000Z | import pickle
import matplotlib.pyplot as plt
import matplotlib.patches
import matplotlib as mpl
import numpy as np
import sys, argparse
sys.path.append("../")
import Plotting
Names = {
'mini_gb2': 'VC-GB2',
'mini_gb5': 'VC-GB5',
'mini_lin': 'VC-Lin',
'epsall_gb2': '$\epsilon$-GB2',
'epsall_gb5': '$\epsilon$-GB5',
'epsall_lin': '$\epsilon$-Lin',
'lin': 'LinUCB'
}
Styles = {
'mini_gb2': ['k', 'solid'],
'mini_gb5': ['r', 'solid'],
'mini_lin': ['g', 'solid'],
'epsall_gb2': ['k', 'dashed'],
'epsall_gb5': ['r', 'dashed'],
'epsall_lin': ['g', 'dashed'],
'lin': ['b', 'solid']
}
parser = argparse.ArgumentParser()
parser.add_argument('--save', dest='save', action='store_true')
Args = parser.parse_args(sys.argv[1:])
D1 = Plotting.read_dir("../results/mslr30k_T=36000_L=3_e=0.1/")
D2 = Plotting.read_dir("../results/yahoo_T=40000_L=2_e=0.5/")
print(mpl.rcParams['figure.figsize'])
fig = plt.figure(figsize=(mpl.rcParams['figure.figsize'][0]*2, mpl.rcParams['figure.figsize'][1]-1))
ax = fig.add_subplot(111,frameon=False)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
std = True
legendHandles = []
keys = ['epsall_lin', 'mini_lin', 'epsall_gb2', 'mini_gb2', 'epsall_gb5', 'mini_gb5', 'lin']
for k in keys:
params = []
mus = []
stds = []
for (k1,v1) in D1[0].items():
if k1.find(k) == 0 and len(D1[0][k1]) != 0:
x = np.arange(100, 10*len(D1[0][k1][0])+1, 100)
mus.append(np.mean(D1[0][k1],axis=0)[9::10]/x)
stds.append(2/np.sqrt(len(D1[0][k1]))*(np.std(D1[0][k1],axis=0)[9::10]/x))
params.append(k1.split("_")[-1])
if len(mus) == 0:
continue
A = np.vstack(mus)
B = np.vstack(stds)
ids = np.argmax(A, axis=0)
mu = np.array([A[ids[i], i] for i in range(len(ids))])
stdev = np.array([B[ids[i], i] for i in range(len(ids))])
if k == 'mini_gb5':
mu = np.mean(D1[0]['mini_gb5_0.008'], axis=0)[9::10]/x
stdev = 2/np.sqrt(len(D1[0]['mini_gb5_0.008']))*(np.std(D1[0]['mini_gb5_0.008'], axis=0)[9::10]/x)
l1 = ax1.plot(x,mu,rasterized=True, linewidth=2.0, label=Names[k], color=Styles[k][0], linestyle=Styles[k][1])
legendHandles.append((matplotlib.patches.Patch(color=l1[0].get_color(), label=Names[k]), Names[k]))
if std and k=='mini_gb5' or k=='lin':
ax1.fill_between(x,
mu - stdev,
mu + stdev,
color = l1[0].get_color(), alpha=0.2, rasterized = True)
for k in keys:
params = []
mus = []
stds = []
for (k1,v1) in D2[0].items():
if k1.find(k) == 0 and len(D2[0][k1]) != 0:
x = np.arange(100, 10*len(D2[0][k1][0])+1, 100)
mus.append(np.mean(D2[0][k1],axis=0)[9::10]/x)
stds.append(2/np.sqrt(len(D2[0][k1]))*(np.std(D2[0][k1],axis=0)[9::10]/x))
params.append(k1.split("_")[-1])
if len(mus) == 0:
continue
A = np.vstack(mus)
B = np.vstack(stds)
ids = np.argmax(A, axis=0)
mu = np.array([A[ids[i], i] for i in range(len(ids))])
stdev = np.array([B[ids[i], i] for i in range(len(ids))])
if k == 'mini_gb5':
mu = np.mean(D2[0]['mini_gb5_0.008'], axis=0)[9::10]/x
stdev = 2/np.sqrt(len(D2[0]['mini_gb5_0.008']))*(np.std(D2[0]['mini_gb5_0.008'], axis=0)[9::10]/x)
l1 = ax2.plot(x,mu,rasterized=True, linewidth=2.0, label=Names[k], color=Styles[k][0], linestyle=Styles[k][1])
if std and k=='mini_gb5' or k=='lin':
ax2.fill_between(x,
mu - stdev,
mu + stdev,
color = l1[0].get_color(), alpha=0.2, rasterized = True)
plt.rc('font', size=18)
plt.rcParams['text.usetex'] = True
plt.rc('font', family='sans-serif')
## Ax1 is MSLR
ticks=ax1.get_yticks()
print(ticks)
ax1.set_ylim(2.15, 2.35)
print("Setting ylim to %0.2f, %0.2f" % (ticks[3], ticks[len(ticks)-2]))
ticks = ax1.get_yticks()
print(ticks)
ticks = ["", "", "2.2", "", "2.3", ""]
ax1.set_yticklabels(ticks,size=20)
ticks = ['', '', '10000', '', '20000', '', '30000']
ax1.set_xlim(1000, 31000)
ax1.set_xticklabels(ticks,size=20)
# Ax2 is Yahoo!
ticks=ax2.get_yticks()
print(ticks)
ax2.set_ylim(2.90,3.12)
print("Setting ylim to %0.2f, %0.2f" % (ticks[3], 3.15))
ticks=ax2.get_yticks()
print(ticks)
ticks = ["", "2.9", "", "3.0", "", "3.1"]
ax2.set_yticklabels(ticks,size=20)
ticks = ['', '', '10000', '', '20000', '', '30000']
ax2.set_xlim(1000, 32000)
ax2.set_xticklabels(ticks,size=20)
plt.sca(ax)
plt.ylabel('Average reward')
plt.xlabel('Number of interactions (T)')
leg = ax2.legend([x[1] for x in legendHandles], loc='upper center', bbox_to_anchor=(-0.1, -0.15), fancybox=False, shadow=False, ncol=7, frameon=False,fontsize=18)
for legobj in leg.legendHandles:
legobj.set_linewidth(4.0)
plt.sca(ax1)
tt1 = plt.title('Dataset: MSLR',fontsize=18)
tt1.set_position([0.5, 1.02])
plt.sca(ax2)
tt2 = plt.title('Dataset: Yahoo!',fontsize=18)
tt2.set_position([0.5, 1.02])
plt.gcf().subplots_adjust(bottom=0.25)
if Args.save:
plt.savefig("../figs/plots_grouped.png", format='png', dpi=100, bbox_inches='tight')
plt.savefig("../figs/plots_grouped.pdf", format='pdf', dpi=100, bbox_inches='tight')
else:
plt.show()
## (DONE) No band
## (DONE) markers + update legend
## (DONE) No legend frame
## (DONE) font is too big
## space between title and plot
## space between ylabel and yticks
## Get P-values (paired ttest and regular ttest)
| 33.331395 | 162 | 0.593581 | import pickle
import matplotlib.pyplot as plt
import matplotlib.patches
import matplotlib as mpl
import numpy as np
import sys, argparse
sys.path.append("../")
import Plotting
Names = {
'mini_gb2': 'VC-GB2',
'mini_gb5': 'VC-GB5',
'mini_lin': 'VC-Lin',
'epsall_gb2': '$\epsilon$-GB2',
'epsall_gb5': '$\epsilon$-GB5',
'epsall_lin': '$\epsilon$-Lin',
'lin': 'LinUCB'
}
Styles = {
'mini_gb2': ['k', 'solid'],
'mini_gb5': ['r', 'solid'],
'mini_lin': ['g', 'solid'],
'epsall_gb2': ['k', 'dashed'],
'epsall_gb5': ['r', 'dashed'],
'epsall_lin': ['g', 'dashed'],
'lin': ['b', 'solid']
}
parser = argparse.ArgumentParser()
parser.add_argument('--save', dest='save', action='store_true')
Args = parser.parse_args(sys.argv[1:])
D1 = Plotting.read_dir("../results/mslr30k_T=36000_L=3_e=0.1/")
D2 = Plotting.read_dir("../results/yahoo_T=40000_L=2_e=0.5/")
print(mpl.rcParams['figure.figsize'])
fig = plt.figure(figsize=(mpl.rcParams['figure.figsize'][0]*2, mpl.rcParams['figure.figsize'][1]-1))
ax = fig.add_subplot(111,frameon=False)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
std = True
legendHandles = []
keys = ['epsall_lin', 'mini_lin', 'epsall_gb2', 'mini_gb2', 'epsall_gb5', 'mini_gb5', 'lin']
for k in keys:
params = []
mus = []
stds = []
for (k1,v1) in D1[0].items():
if k1.find(k) == 0 and len(D1[0][k1]) != 0:
x = np.arange(100, 10*len(D1[0][k1][0])+1, 100)
mus.append(np.mean(D1[0][k1],axis=0)[9::10]/x)
stds.append(2/np.sqrt(len(D1[0][k1]))*(np.std(D1[0][k1],axis=0)[9::10]/x))
params.append(k1.split("_")[-1])
if len(mus) == 0:
continue
A = np.vstack(mus)
B = np.vstack(stds)
ids = np.argmax(A, axis=0)
mu = np.array([A[ids[i], i] for i in range(len(ids))])
stdev = np.array([B[ids[i], i] for i in range(len(ids))])
if k == 'mini_gb5':
mu = np.mean(D1[0]['mini_gb5_0.008'], axis=0)[9::10]/x
stdev = 2/np.sqrt(len(D1[0]['mini_gb5_0.008']))*(np.std(D1[0]['mini_gb5_0.008'], axis=0)[9::10]/x)
l1 = ax1.plot(x,mu,rasterized=True, linewidth=2.0, label=Names[k], color=Styles[k][0], linestyle=Styles[k][1])
legendHandles.append((matplotlib.patches.Patch(color=l1[0].get_color(), label=Names[k]), Names[k]))
if std and k=='mini_gb5' or k=='lin':
ax1.fill_between(x,
mu - stdev,
mu + stdev,
color = l1[0].get_color(), alpha=0.2, rasterized = True)
for k in keys:
params = []
mus = []
stds = []
for (k1,v1) in D2[0].items():
if k1.find(k) == 0 and len(D2[0][k1]) != 0:
x = np.arange(100, 10*len(D2[0][k1][0])+1, 100)
mus.append(np.mean(D2[0][k1],axis=0)[9::10]/x)
stds.append(2/np.sqrt(len(D2[0][k1]))*(np.std(D2[0][k1],axis=0)[9::10]/x))
params.append(k1.split("_")[-1])
if len(mus) == 0:
continue
A = np.vstack(mus)
B = np.vstack(stds)
ids = np.argmax(A, axis=0)
mu = np.array([A[ids[i], i] for i in range(len(ids))])
stdev = np.array([B[ids[i], i] for i in range(len(ids))])
if k == 'mini_gb5':
mu = np.mean(D2[0]['mini_gb5_0.008'], axis=0)[9::10]/x
stdev = 2/np.sqrt(len(D2[0]['mini_gb5_0.008']))*(np.std(D2[0]['mini_gb5_0.008'], axis=0)[9::10]/x)
l1 = ax2.plot(x,mu,rasterized=True, linewidth=2.0, label=Names[k], color=Styles[k][0], linestyle=Styles[k][1])
if std and k=='mini_gb5' or k=='lin':
ax2.fill_between(x,
mu - stdev,
mu + stdev,
color = l1[0].get_color(), alpha=0.2, rasterized = True)
plt.rc('font', size=18)
plt.rcParams['text.usetex'] = True
plt.rc('font', family='sans-serif')
## Ax1 is MSLR
ticks=ax1.get_yticks()
print(ticks)
ax1.set_ylim(2.15, 2.35)
print("Setting ylim to %0.2f, %0.2f" % (ticks[3], ticks[len(ticks)-2]))
ticks = ax1.get_yticks()
print(ticks)
ticks = ["", "", "2.2", "", "2.3", ""]
ax1.set_yticklabels(ticks,size=20)
ticks = ['', '', '10000', '', '20000', '', '30000']
ax1.set_xlim(1000, 31000)
ax1.set_xticklabels(ticks,size=20)
# Ax2 is Yahoo!
ticks=ax2.get_yticks()
print(ticks)
ax2.set_ylim(2.90,3.12)
print("Setting ylim to %0.2f, %0.2f" % (ticks[3], 3.15))
ticks=ax2.get_yticks()
print(ticks)
ticks = ["", "2.9", "", "3.0", "", "3.1"]
ax2.set_yticklabels(ticks,size=20)
ticks = ['', '', '10000', '', '20000', '', '30000']
ax2.set_xlim(1000, 32000)
ax2.set_xticklabels(ticks,size=20)
plt.sca(ax)
plt.ylabel('Average reward')
plt.xlabel('Number of interactions (T)')
leg = ax2.legend([x[1] for x in legendHandles], loc='upper center', bbox_to_anchor=(-0.1, -0.15), fancybox=False, shadow=False, ncol=7, frameon=False,fontsize=18)
for legobj in leg.legendHandles:
legobj.set_linewidth(4.0)
plt.sca(ax1)
tt1 = plt.title('Dataset: MSLR',fontsize=18)
tt1.set_position([0.5, 1.02])
plt.sca(ax2)
tt2 = plt.title('Dataset: Yahoo!',fontsize=18)
tt2.set_position([0.5, 1.02])
plt.gcf().subplots_adjust(bottom=0.25)
if Args.save:
plt.savefig("../figs/plots_grouped.png", format='png', dpi=100, bbox_inches='tight')
plt.savefig("../figs/plots_grouped.pdf", format='pdf', dpi=100, bbox_inches='tight')
else:
plt.show()
## (DONE) No band
## (DONE) markers + update legend
## (DONE) No legend frame
## (DONE) font is too big
## space between title and plot
## space between ylabel and yticks
## Get P-values (paired ttest and regular ttest)
| 0 | 0 | 0 |
010b5e0043bc5876ab1a0b032ec3c37d63f3d2ba | 3,697 | py | Python | pacmill/filtering/mothur_uchime.py | xapple/pacmill | 1fde2316968251eaaf72618fd3d104ba524d6d12 | [
"MIT"
] | null | null | null | pacmill/filtering/mothur_uchime.py | xapple/pacmill | 1fde2316968251eaaf72618fd3d104ba524d6d12 | [
"MIT"
] | null | null | null | pacmill/filtering/mothur_uchime.py | xapple/pacmill | 1fde2316968251eaaf72618fd3d104ba524d6d12 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Written by Lucas Sinclair.
MIT Licensed.
Contact at www.sinclair.bio
"""
# Built-in modules #
import os
# First party modules #
from fasta import FASTQ
from plumbing.check_cmd_found import check_cmd
from plumbing.cache import property_cached
from autopaths.tmp_path import new_temp_dir, new_temp_path
# Third party modules #
import sh
###############################################################################
class MothurUchime:
"""
Takes care of detecting and removing chimeric reads, by calling
`mothur.uchime` as seen in:
https://github.com/novigit/broCode/blob/master/pbamp/readCurationPipeline.sh#L103
mothur "#chimera.uchime(fasta=roi.$sample.rhq.trim.fwdrev.pol.fasta,
reference=self, chunks=16, abskew=1, chimealns=T)"
mothur "#remove.seqs(fasta=roi.$sample.rhq.trim.fwdrev.pol.fasta,
accnos=roi.$sample.rhq.trim.fwdrev.pol.denovo.uchime.accnos)"
In case of missing output see:
https://forum.mothur.org/t/chimera-uchime-does-not-produce-any-output/20621
"""
#------------------------------ Running ----------------------------------#
#------------------------------- Results ---------------------------------#
def __bool__(self):
"""
Return True if the chimeras software was run already and the results are
stored on the filesystem. Return False if it was not yet run.
"""
return self.dest.exists
@property_cached
###############################################################################
class ChimerasResults(FASTQ):
"""A file with the results from chimeras."""
pass | 34.231481 | 86 | 0.54693 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Written by Lucas Sinclair.
MIT Licensed.
Contact at www.sinclair.bio
"""
# Built-in modules #
import os
# First party modules #
from fasta import FASTQ
from plumbing.check_cmd_found import check_cmd
from plumbing.cache import property_cached
from autopaths.tmp_path import new_temp_dir, new_temp_path
# Third party modules #
import sh
###############################################################################
class MothurUchime:
"""
Takes care of detecting and removing chimeric reads, by calling
`mothur.uchime` as seen in:
https://github.com/novigit/broCode/blob/master/pbamp/readCurationPipeline.sh#L103
mothur "#chimera.uchime(fasta=roi.$sample.rhq.trim.fwdrev.pol.fasta,
reference=self, chunks=16, abskew=1, chimealns=T)"
mothur "#remove.seqs(fasta=roi.$sample.rhq.trim.fwdrev.pol.fasta,
accnos=roi.$sample.rhq.trim.fwdrev.pol.denovo.uchime.accnos)"
In case of missing output see:
https://forum.mothur.org/t/chimera-uchime-does-not-produce-any-output/20621
"""
def __repr__(self):
msg = '<%s object on "%s">'
return msg % (self.__class__.__name__, self.source.path)
def __init__(self, source, dest=None):
# Source is the FASTA or FASTQ file #
self.source = FASTQ(source)
# Destination is a FASTQ file that contains the results #
if dest is None:
dest = self.source.prefix_path + '.chimeras.fastq'
self.dest = FASTQ(dest)
#------------------------------ Running ----------------------------------#
def __call__(self, verbose=True):
# Message #
if verbose: print("Running chimeras detection on '%s'" % self.source)
# Check it is installed #
check_cmd('mothur', True)
# Check version #
assert "1.42.1" in sh.mothur('--version')
# Make new temporary directory #
tmp_dir = new_temp_dir()
source = tmp_dir + 'reads.fasta'
# If the input is a FASTQ we need to make a FASTA first #
if self.source.endswith('fastq'):
FASTQ(self.source).to_fasta(source)
else:
self.source.copy_to(source)
# Make the long command as a string #
command = "#chimera.uchime(" \
"fasta=%s," \
"reference=self," \
"chunks=16," \
"chimealns=T," \
"abskew=1)"
# Mothur pollutes with so much files, have to cwd #
current_dir = os.getcwd()
os.chdir(tmp_dir)
# Run the command on the input FASTA file #
sh.mothur(command % source)
# Restore current directory #
os.chdir(current_dir)
# Move files #
pass #TODO
# Return #
return self.dest
#------------------------------- Results ---------------------------------#
def __bool__(self):
"""
Return True if the chimeras software was run already and the results are
stored on the filesystem. Return False if it was not yet run.
"""
return self.dest.exists
@property_cached
def results(self):
# Check it was run #
if not self:
msg = "You can't access results from chimeras " \
"before running the tool."
raise Exception(msg)
# Return the results #
return ChimerasResults(self.dest)
###############################################################################
class ChimerasResults(FASTQ):
"""A file with the results from chimeras."""
pass | 1,871 | 0 | 106 |
098723ab33dce0dbfff39bd8321132e24734ccf0 | 6,980 | py | Python | olypy/oio.py | akhand2222/olypy | d2b62a590c416a7d919e6e7bfcabf35873bc13c4 | [
"Apache-2.0"
] | null | null | null | olypy/oio.py | akhand2222/olypy | d2b62a590c416a7d919e6e7bfcabf35873bc13c4 | [
"Apache-2.0"
] | null | null | null | olypy/oio.py | akhand2222/olypy | d2b62a590c416a7d919e6e7bfcabf35873bc13c4 | [
"Apache-2.0"
] | null | null | null | '''
Read and write Olympia state files.
'''
import os
import os.path
import sys
from contextlib import redirect_stdout
from .oid import to_oid
from .formatters import print_one_thing, read_oly_file
def fixup_ms(data):
'''
For whatever reason, the value in IM/ms needs to have a trailing space
'''
for box in data:
if 'IM' in data[box]:
if 'ms' in data[box]['IM']:
value = data[box]['IM']['ms']
value[0] = value[0].strip() + ' '
data[box]['IM']['ms'] = value
def write_oly_file(data, kind=False, verbose=False):
'''
The main function that drives outputting a file
'''
fixup_ms(data)
order = sorted([int(box) for box in data.keys()])
count = 0
for box in order:
box = str(box)
if kind:
if ' '+kind+' ' not in data[box].get('firstline', '')[0]:
continue
print_one_thing(data[box])
del data[box]
count += 1
if verbose:
print('wrote', count, verbose, 'boxes.', file=sys.stderr)
def read_players(dir, verbose=False):
'''
read every fie in dir whose name is an integer
'''
ret = {}
files = os.listdir(dir)
for name in files:
if name.isdigit():
data = read_oly_file(os.path.join(dir, name), verbose='player ' + name)
ret.update(data)
return ret
| 30.21645 | 96 | 0.559312 | '''
Read and write Olympia state files.
'''
import os
import os.path
import sys
from contextlib import redirect_stdout
from .oid import to_oid
from .formatters import print_one_thing, read_oly_file
def fixup_ms(data):
'''
For whatever reason, the value in IM/ms needs to have a trailing space
'''
for box in data:
if 'IM' in data[box]:
if 'ms' in data[box]['IM']:
value = data[box]['IM']['ms']
value[0] = value[0].strip() + ' '
data[box]['IM']['ms'] = value
def write_oly_file(data, kind=False, verbose=False):
'''
The main function that drives outputting a file
'''
fixup_ms(data)
order = sorted([int(box) for box in data.keys()])
count = 0
for box in order:
box = str(box)
if kind:
if ' '+kind+' ' not in data[box].get('firstline', '')[0]:
continue
print_one_thing(data[box])
del data[box]
count += 1
if verbose:
print('wrote', count, verbose, 'boxes.', file=sys.stderr)
def write_player(data, box, verbose=False):
player_box = box
boxlist = data[box].get('PL', {}).get('un', {})
print_one_thing(data[box])
del data[box]
count = 0
for box in boxlist:
print_one_thing(data[box])
del data[box]
count += 1
if verbose:
print('wrote', count, 'characters for player', to_oid(int(player_box)), file=sys.stderr)
def read_players(dir, verbose=False):
'''
read every fie in dir whose name is an integer
'''
ret = {}
files = os.listdir(dir)
for name in files:
if name.isdigit():
data = read_oly_file(os.path.join(dir, name), verbose='player ' + name)
ret.update(data)
return ret
def write_players(data, dir, verbose=False):
boxlist = list(data.keys()) # we're deleting as we go
for box in boxlist:
if data.get(box) is None:
continue
if ' player ' in data[box]['firstline'][0]:
fact = os.path.join(dir, 'fact')
if not os.path.isdir(fact):
os.mkdir(fact)
filename = os.path.join(dir, 'fact', box)
with open(filename, 'w') as f:
with redirect_stdout(f):
write_player(data, box, verbose=verbose)
def write_system_file(data):
fr = None
lt = 1
tr = None
ur = None
hr = None
hp = None
nr = None
nl = None
cr = None
for k, v in data.items():
fl = v['firstline'][0]
try:
na = v['na'][0]
except KeyError:
na = ''
if ' player pl_regular' in fl:
lt = max(lt, int(v['PL']['lt'][0]))
if fr is None and ' loc region' in fl and na == 'Faery':
if v.get('LI', {}).get('hl'):
fr = k
else:
fr = 0
if tr is None and ' loc region' in fl and na == 'Undercity':
if data[k].get('LI', {}).get('hl'):
tr = k
else:
tr = 0
if ur is None and ' loc region' in fl and na == 'Subworld':
if data[k].get('LI', {}).get('hl'):
ur = k
else:
ur = 0
if hr is None and ' loc region' in fl and na == 'Hades':
if data[k].get('LI', {}).get('hl'):
hr = k
else:
hr = 0
if hp is None and fl.endswith(' loc pit'): # normal pits are 'pits'
hp = k
if nr is None and ' loc region' in fl and na == 'Nowhere':
nr = k
nl = v['LI']['hl'][0]
if cr is None and ' loc region' in fl and na == 'Cloudlands':
if data[k].get('LI', {}).get('hl'):
cr = k
else:
cr = 0
if hp is None:
# not surprising for a player sim
# if I wanted to do this right I have to also create City of the Dead in a provinces.
# fake it.
hp = hr
days_per_month = 30
days_since_epoch = lt * days_per_month
system = '''sysclock: {} {} {}
indep_player=100
gm_player=200
skill_player=202
from_host=foo@example.com
reply_host=foo@example.com
game_title=SIMULATION
post=1
init=1
fr={}
tr={}
ur={}
fp=204
hr={}
hp={}
hl=205
nr={}
nl={}
np=206
cr={}
cp=210
'''.format(lt, days_per_month, days_since_epoch, fr, tr, ur, hr, hp, nr, nl, cr)
if 'None' in system:
raise ValueError('failed to find some stuff for system:\n' + system)
print(system)
def read_lib(libdir):
if not os.path.isdir(libdir):
raise ValueError('libdir {} is not a directory'.format(libdir))
data = read_oly_file(os.path.join(libdir, 'loc'), verbose='loc')
data.update(read_oly_file(os.path.join(libdir, 'item'), verbose='item'))
data.update(read_oly_file(os.path.join(libdir, 'skill'), verbose='skill'))
data.update(read_oly_file(os.path.join(libdir, 'gate'), verbose='gate'))
data.update(read_oly_file(os.path.join(libdir, 'road'), verbose='road'))
data.update(read_oly_file(os.path.join(libdir, 'ship'), verbose='ship'))
data.update(read_oly_file(os.path.join(libdir, 'unform'), verbose='unform'))
data.update(read_oly_file(os.path.join(libdir, 'misc'), verbose='misc'))
data.update(read_players(os.path.join(libdir, 'fact'), verbose=True))
return data
def write_lib(data, libdir):
if os.path.exists(libdir):
if not os.path.isdir(libdir):
raise ValueError('libdir {} is not a directory'.format(libdir))
else:
os.mkdir(libdir)
with open(os.path.join(libdir, 'system'), 'w') as f:
with redirect_stdout(f):
write_system_file(data)
with open(os.path.join(libdir, 'loc'), 'w') as f:
with redirect_stdout(f):
write_oly_file(data, kind='loc', verbose='loc')
with open(os.path.join(libdir, 'item'), 'w') as f:
with redirect_stdout(f):
write_oly_file(data, kind='item', verbose='item')
with open(os.path.join(libdir, 'skill'), 'w') as f:
with redirect_stdout(f):
write_oly_file(data, kind='skill', verbose='skill')
with open(os.path.join(libdir, 'gate'), 'w') as f:
with redirect_stdout(f):
write_oly_file(data, kind='gate', verbose='gate')
with open(os.path.join(libdir, 'road'), 'w') as f:
with redirect_stdout(f):
write_oly_file(data, kind='road', verbose='road')
with open(os.path.join(libdir, 'ship'), 'w') as f:
with redirect_stdout(f):
write_oly_file(data, kind='ship', verbose='ship')
with open(os.path.join(libdir, 'unform'), 'w') as f:
with redirect_stdout(f):
write_oly_file(data, kind='unform', verbose='unform')
write_players(data, libdir, verbose=True)
with open(os.path.join(libdir, 'misc'), 'w') as f:
with redirect_stdout(f):
write_oly_file(data, verbose='misc') # catchall
| 5,459 | 0 | 115 |
4ec1d714cb3bf6af4fe8c713411aefe484a53f23 | 589 | py | Python | flat/announce/migrations/0002_auto_20150916_0830.py | bilbeyt/ITURO-Giant_Flat | 5a3f766ab1394cefd3589a30c07b5a68b48be00e | [
"MIT"
] | null | null | null | flat/announce/migrations/0002_auto_20150916_0830.py | bilbeyt/ITURO-Giant_Flat | 5a3f766ab1394cefd3589a30c07b5a68b48be00e | [
"MIT"
] | null | null | null | flat/announce/migrations/0002_auto_20150916_0830.py | bilbeyt/ITURO-Giant_Flat | 5a3f766ab1394cefd3589a30c07b5a68b48be00e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
| 22.653846 | 62 | 0.575552 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('announce', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='announce',
old_name='content',
new_name='content_en',
),
migrations.AddField(
model_name='announce',
name='content_tr',
field=models.CharField(default=1, max_length=100),
preserve_default=False,
),
]
| 0 | 459 | 23 |
b8458625ce4634483d199c8fcd5007dd84bf4c6b | 2,097 | py | Python | CondTools/L1Trigger/python/L1ConfigTSCPayloads_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | CondTools/L1Trigger/python/L1ConfigTSCPayloads_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | CondTools/L1Trigger/python/L1ConfigTSCPayloads_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | from L1TriggerConfig.CSCTFConfigProducers.CSCTFConfigOnline_cfi import *
#from L1TriggerConfig.CSCTFConfigProducers.CSCTFAlignmentOnline_cfi import *
from L1TriggerConfig.CSCTFConfigProducers.L1MuCSCPtLutConfigOnline_cfi import *
from L1TriggerConfig.DTTrackFinder.L1MuDTEtaPatternLutOnline_cfi import *
from L1TriggerConfig.DTTrackFinder.L1MuDTExtLutOnline_cfi import *
from L1TriggerConfig.DTTrackFinder.L1MuDTPhiLutOnline_cfi import *
from L1TriggerConfig.DTTrackFinder.L1MuDTPtaLutOnline_cfi import *
from L1TriggerConfig.DTTrackFinder.L1MuDTQualPatternLutOnline_cfi import *
from L1TriggerConfig.DTTrackFinder.L1MuDTTFParametersOnline_cfi import *
from L1TriggerConfig.RPCTriggerConfig.L1RPCConfigOnline_cfi import *
from L1TriggerConfig.RPCTriggerConfig.L1RPCConeDefinitionOnline_cfi import *
from L1TriggerConfig.RPCTriggerConfig.L1RPCBxOrConfigOnline_cfi import *
from L1TriggerConfig.RPCTriggerConfig.L1RPCHsbConfigOnline_cfi import *
from L1TriggerConfig.GMTConfigProducers.L1MuGMTParametersOnlineProducer_cfi import *
from L1TriggerConfig.L1ScalesProducers.L1MuTriggerPtScaleOnlineProducer_cfi import *
from L1TriggerConfig.L1ScalesProducers.L1MuTriggerScalesOnlineProducer_cfi import *
L1MuGMTParametersOnlineProducer.ignoreVersionMismatch = True
from L1TriggerConfig.RCTConfigProducers.L1RCTParametersOnline_cfi import *
from L1TriggerConfig.L1ScalesProducers.L1EmEtScaleConfigOnline_cfi import *
from L1TriggerConfig.L1ScalesProducers.L1CaloEcalScaleConfigOnline_cfi import *
from L1TriggerConfig.L1ScalesProducers.L1CaloHcalScaleConfigOnline_cfi import *
from L1TriggerConfig.GctConfigProducers.L1GctJetFinderParamsOnline_cfi import *
from L1TriggerConfig.L1ScalesProducers.L1HtMissScaleOnline_cfi import *
from L1TriggerConfig.L1ScalesProducers.L1HfRingEtScaleOnline_cfi import *
from L1TriggerConfig.L1ScalesProducers.L1JetEtScaleOnline_cfi import *
from L1TriggerConfig.L1GtConfigProducers.l1GtParametersOnline_cfi import *
from L1TriggerConfig.L1GtConfigProducers.l1GtPsbSetupOnline_cfi import *
from L1TriggerConfig.L1GtConfigProducers.l1GtTriggerMenuOnline_cfi import *
| 59.914286 | 84 | 0.904149 | from L1TriggerConfig.CSCTFConfigProducers.CSCTFConfigOnline_cfi import *
#from L1TriggerConfig.CSCTFConfigProducers.CSCTFAlignmentOnline_cfi import *
from L1TriggerConfig.CSCTFConfigProducers.L1MuCSCPtLutConfigOnline_cfi import *
from L1TriggerConfig.DTTrackFinder.L1MuDTEtaPatternLutOnline_cfi import *
from L1TriggerConfig.DTTrackFinder.L1MuDTExtLutOnline_cfi import *
from L1TriggerConfig.DTTrackFinder.L1MuDTPhiLutOnline_cfi import *
from L1TriggerConfig.DTTrackFinder.L1MuDTPtaLutOnline_cfi import *
from L1TriggerConfig.DTTrackFinder.L1MuDTQualPatternLutOnline_cfi import *
from L1TriggerConfig.DTTrackFinder.L1MuDTTFParametersOnline_cfi import *
from L1TriggerConfig.RPCTriggerConfig.L1RPCConfigOnline_cfi import *
from L1TriggerConfig.RPCTriggerConfig.L1RPCConeDefinitionOnline_cfi import *
from L1TriggerConfig.RPCTriggerConfig.L1RPCBxOrConfigOnline_cfi import *
from L1TriggerConfig.RPCTriggerConfig.L1RPCHsbConfigOnline_cfi import *
from L1TriggerConfig.GMTConfigProducers.L1MuGMTParametersOnlineProducer_cfi import *
from L1TriggerConfig.L1ScalesProducers.L1MuTriggerPtScaleOnlineProducer_cfi import *
from L1TriggerConfig.L1ScalesProducers.L1MuTriggerScalesOnlineProducer_cfi import *
L1MuGMTParametersOnlineProducer.ignoreVersionMismatch = True
from L1TriggerConfig.RCTConfigProducers.L1RCTParametersOnline_cfi import *
from L1TriggerConfig.L1ScalesProducers.L1EmEtScaleConfigOnline_cfi import *
from L1TriggerConfig.L1ScalesProducers.L1CaloEcalScaleConfigOnline_cfi import *
from L1TriggerConfig.L1ScalesProducers.L1CaloHcalScaleConfigOnline_cfi import *
from L1TriggerConfig.GctConfigProducers.L1GctJetFinderParamsOnline_cfi import *
from L1TriggerConfig.L1ScalesProducers.L1HtMissScaleOnline_cfi import *
from L1TriggerConfig.L1ScalesProducers.L1HfRingEtScaleOnline_cfi import *
from L1TriggerConfig.L1ScalesProducers.L1JetEtScaleOnline_cfi import *
from L1TriggerConfig.L1GtConfigProducers.l1GtParametersOnline_cfi import *
from L1TriggerConfig.L1GtConfigProducers.l1GtPsbSetupOnline_cfi import *
from L1TriggerConfig.L1GtConfigProducers.l1GtTriggerMenuOnline_cfi import *
| 0 | 0 | 0 |
9290a51e42313a8d3b64495f00e57922277cdd9a | 1,366 | py | Python | powerdnsadmin/models/domain_template_record.py | warf/PowerDNS-Admin | 3bf6e6e9f119d2af7f2faff46c41a2152b84d3ab | [
"MIT"
] | 1 | 2019-12-31T04:51:02.000Z | 2019-12-31T04:51:02.000Z | powerdnsadmin/models/domain_template_record.py | warf/PowerDNS-Admin | 3bf6e6e9f119d2af7f2faff46c41a2152b84d3ab | [
"MIT"
] | 1 | 2021-03-11T06:55:41.000Z | 2021-03-11T06:55:41.000Z | powerdnsadmin/models/domain_template_record.py | warf/PowerDNS-Admin | 3bf6e6e9f119d2af7f2faff46c41a2152b84d3ab | [
"MIT"
] | null | null | null | from .base import db
| 29.695652 | 77 | 0.545388 | from .base import db
class DomainTemplateRecord(db.Model):
__tablename__ = "domain_template_record"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
type = db.Column(db.String(64))
ttl = db.Column(db.Integer)
data = db.Column(db.Text)
comment = db.Column(db.Text)
status = db.Column(db.Boolean)
template_id = db.Column(db.Integer, db.ForeignKey('domain_template.id'))
template = db.relationship('DomainTemplate', back_populates='records')
def __repr__(self):
return '<DomainTemplateRecord {0}>'.format(self.id)
def __init__(self,
id=None,
name=None,
type=None,
ttl=None,
data=None,
comment=None,
status=None):
self.id = id
self.name = name
self.type = type
self.ttl = ttl
self.data = data
self.comment = comment
self.status = status
def apply(self):
try:
db.session.commit()
except Exception as e:
logging.error(
'Can not update domain template table. Error: {0}'.format(e))
db.session.rollback()
return {
'status': 'error',
'msg': 'Can not update domain template table'
}
| 775 | 546 | 23 |
884efdb2fa9981926fc3d98ed7f25106e9e285d7 | 3,083 | py | Python | test/math/TestUniverse.py | pulsar-chem/BPModule | f8e64e04fdb01947708f098e833600c459c2ff0e | [
"BSD-3-Clause"
] | null | null | null | test/math/TestUniverse.py | pulsar-chem/BPModule | f8e64e04fdb01947708f098e833600c459c2ff0e | [
"BSD-3-Clause"
] | null | null | null | test/math/TestUniverse.py | pulsar-chem/BPModule | f8e64e04fdb01947708f098e833600c459c2ff0e | [
"BSD-3-Clause"
] | null | null | null | import pulsar as psr
| 46.712121 | 87 | 0.725268 | import pulsar as psr
def run_test():
tester = psr.PyTester("Testing Universe Python Interface")
#Constructors and assignment
U0, U1=psr.DoubleUniverse(),psr.DoubleUniverse()
tester.test_equal("Default constructor",U0,U1)
U2=psr.DoubleUniverse([1.0,2.0,3.0])
U3=psr.DoubleUniverse([1.0,2.0,3.0])
tester.test_equal("Variadic initializer",U2,U3);
U4=psr.DoubleUniverse(U3);
tester.test_equal("Copy constructor",U3,U4);
U0=U2;
tester.test_equal("Assignment",U0,U3);
#Basic properties
tester.test_return("Size",True,3,U0.size)
tester.test_return("Count",True,True,U0.count,1.0)
tester.test_return("Get index",True,2,U3.idx,3.0)
tester.test_call("Get non-existant index",False,U3.idx,5.0)
tester.test_return("Get hash U0",True,U0.my_hash(),U3.my_hash)
tester.test_return("Get hash U3",True,U0.my_hash(),U4.my_hash)
#Element access/modification
tester.test_return("Subscript operator",True,3.0,U0.__getitem__,2)
tester.test_call("Subscript operator (out of range)",False,U0.__getitem__,9)
tester.test_return("at function",True,3.0,U0.at,2)
tester.test_call("at function (out of range)",False,U0.at,9)
U0.insert(4.0)
tester.test_return("insert elements",True,U0,U3.insert,4.0)
#U0=U3=1-4; U1=empty; U2=U4=1-3
#Set operations
U5=psr.DoubleUniverse([4.0,5.0,6.0])
U8=psr.DoubleUniverse([1.0,2.0,3.0,4.0,5.0,6.0])
tester.test_return("union assign",True,U8,U0.union_assign,U5)
tester.test_return("union",True,U8,U3.set_union,U5)
U9=psr.DoubleUniverse([1.0,2.0])
U10=psr.DoubleUniverse([1.0,2.0,15.0,16.0])
tester.test_return("intersection assign",True,U9,U0.intersection_assign,U10)
print(U10)
tester.test_return("intersection",True,U9,U3.intersection,U10)
U11=psr.DoubleUniverse([3.0,4.0,5.0,6.0])
tester.test_return("difference",True,U11,U8.difference,U9)
tester.test_return("difference assign",True,U11,U8.difference_assign,U9)
#Comparison operators
tester.test_return("not equal",True,True,U9.__ne__,U11)
tester.test_return("superset equal",True,True,U11.is_superset_of,U8)
tester.test_return("superset true",True,True,U10.is_superset_of,U9)
tester.test_return("superset false",True,False,U9.is_superset_of,U10)
tester.test_return("proper superset equal",True,False,U11.is_proper_superset_of,U8)
tester.test_return("proper supserset true",True,True,U10.is_proper_superset_of,U9)
tester.test_return("proper superset false",True,False,U9.is_proper_superset_of,U10)
tester.test_return("subset equal",True,True,U11.is_subset_of,U8)
tester.test_return("subset true",True,True,U9.is_subset_of,U10)
tester.test_return("subset false",True,False,U10.is_subset_of,U9)
tester.test_return("proper subset equal",True,False,U11.is_proper_subset_of,U8)
tester.test_return("proper subset true",True,True,U9.is_proper_subset_of,U10)
tester.test_return("proper subset false",True,False,U10.is_proper_subset_of,U9)
tester.print_results()
return tester.nfailed()
| 3,039 | 0 | 23 |
9215d936336e65cb6f3b980e5620414869829e05 | 354 | py | Python | orchard/system_status/__init__.py | BMeu/Orchard | cd595c9942e4e1ad0032193059f2b39fdf3bcfba | [
"MIT"
] | 2 | 2016-10-06T21:19:32.000Z | 2016-10-06T21:58:04.000Z | orchard/system_status/__init__.py | BMeu/Orchard | cd595c9942e4e1ad0032193059f2b39fdf3bcfba | [
"MIT"
] | 392 | 2016-10-06T17:13:30.000Z | 2021-01-15T04:15:38.000Z | orchard/system_status/__init__.py | BMeu/Orchard | cd595c9942e4e1ad0032193059f2b39fdf3bcfba | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
This package includes the Flask blueprint and all related functionality for displaying system
information.
"""
from .blueprint import blueprint
from .status_item import StatusItem
from .status_group import StatusGroup
import orchard.system_status.views # NOQA
__all__ = ['blueprint', 'StatusGroup', 'StatusItem']
| 23.6 | 97 | 0.751412 | # -*- coding: utf-8 -*-
"""
This package includes the Flask blueprint and all related functionality for displaying system
information.
"""
from .blueprint import blueprint
from .status_item import StatusItem
from .status_group import StatusGroup
import orchard.system_status.views # NOQA
__all__ = ['blueprint', 'StatusGroup', 'StatusItem']
| 0 | 0 | 0 |
e1f06e37830afaf38b03d65459b500198005e166 | 1,290 | py | Python | PP4E/Examples/PP4E/Integrate/Embed/prioredition-2x/Inventory/validate1.py | BeacherHou/Python-_Markdown- | 015d79a02d32f49395b80ca10919b3a09b72c4df | [
"MIT"
] | null | null | null | PP4E/Examples/PP4E/Integrate/Embed/prioredition-2x/Inventory/validate1.py | BeacherHou/Python-_Markdown- | 015d79a02d32f49395b80ca10919b3a09b72c4df | [
"MIT"
] | null | null | null | PP4E/Examples/PP4E/Integrate/Embed/prioredition-2x/Inventory/validate1.py | BeacherHou/Python-_Markdown- | 015d79a02d32f49395b80ca10919b3a09b72c4df | [
"MIT"
] | null | null | null | # embedded validation code, run from C
# input vars: PRODUCT, QUANTITY, BUYER
# output vars: ERRORS, WARNINGS
import string # all python tools are available to embedded code
import inventory # plus C extensions, Python modules, classes,..
msgs, errs = [], [] # warning, error message lists
first, last = BUYER[0], BUYER[1:] # code is changeable on-site:
if first not in string.uppercase: # this file is run as one long
errs.append('buyer-name:' + first) # code-string, with input and
if BUYER not in inventory.buyers(): # output vars used by the C app
msgs.append('new-buyer-added')
inventory.add_buyer(BUYER)
validate_order()
ERRORS = ' '.join(errs) # add a space between messages
WARNINGS = ' '.join(msgs) # pass out as strings: "" == none
| 43 | 79 | 0.624031 | # embedded validation code, run from C
# input vars: PRODUCT, QUANTITY, BUYER
# output vars: ERRORS, WARNINGS
import string # all python tools are available to embedded code
import inventory # plus C extensions, Python modules, classes,..
msgs, errs = [], [] # warning, error message lists
def validate_order():
if PRODUCT not in inventory.skus(): # this function could be imported
errs.append('bad-product') # from a user-defined module too
elif QUANTITY > inventory.stock(PRODUCT):
errs.append('check-quantity')
else:
inventory.reduce(PRODUCT, QUANTITY)
if inventory.stock(PRODUCT) / QUANTITY < 2:
msgs.append('reorder-soon:' + `PRODUCT`)
first, last = BUYER[0], BUYER[1:] # code is changeable on-site:
if first not in string.uppercase: # this file is run as one long
errs.append('buyer-name:' + first) # code-string, with input and
if BUYER not in inventory.buyers(): # output vars used by the C app
msgs.append('new-buyer-added')
inventory.add_buyer(BUYER)
validate_order()
ERRORS = ' '.join(errs) # add a space between messages
WARNINGS = ' '.join(msgs) # pass out as strings: "" == none
| 408 | 0 | 25 |
7014bf2c3e6cadc190f879e6754e67fbb619c0e2 | 2,850 | py | Python | examples/auracog_suggestions/sessions/sessions.py | Telefonica/clipspy | 87d1d63604a209e2271efd3d3b8df0943836a504 | [
"BSD-3-Clause"
] | null | null | null | examples/auracog_suggestions/sessions/sessions.py | Telefonica/clipspy | 87d1d63604a209e2271efd3d3b8df0943836a504 | [
"BSD-3-Clause"
] | null | null | null | examples/auracog_suggestions/sessions/sessions.py | Telefonica/clipspy | 87d1d63604a209e2271efd3d3b8df0943836a504 | [
"BSD-3-Clause"
] | null | null | null | from typing import Any, Dict, Iterable, List, Text
from ..common import FeatureSet
class SessionFeatureSet(FeatureSet):
"""
This class represents a session's set of features.
"""
def __init__(self, features: Dict[Text, Any], aura_id_name="AURA_ID", aura_id_global_name="AURA_ID_GLOBAL",
session_id_name="SESSION_ID"):
"""
:param features: dictionary of features.
:param aura_id_name: the name of the aura_id feature.
:param aura_id_global_name: the name of the aura_id_global feature.
:param session_id_name: the name of the session_id feature.
"""
super().__init__(features)
self.aura_id_name = aura_id_name
self.aura_id_global_name = aura_id_global_name
self.session_id_name = session_id_name
@property
@property
@property
@classmethod
def build_from_row(cls, values: List[Any], names: List[Text], aura_id_name="AURA_ID",
aura_id_global_name="AURA_ID_GLOBAL"):
"""
Build a SessionFeaturesSet from the contents of a row (composed of a list of values and corresponding names).
:param values: list of feature values.
:param names: list of the corresponding names.
:param aura_id_name: the name of the aura_id feature.
:param aura_id_global_name: the name of the aura_id_global feature.
:param session_id_name: the name of the session_id feature.
"""
return SessionFeatureSet(cls.build_features(values, names), aura_id_name=aura_id_name,
aura_id_global_name=aura_id_global_name, session_id_name=aura_id_global_name)
class SessionClusterModel(object):
"""
This class represents a clustering model for sessions.
"""
def predict(self, X: Iterable[SessionFeatureSet], **kwargs) -> List[int]:
"""
Predict the fittest cluster for a list of sessions.
:param X: List of sessions to cluster.
:return: List with the ids of the fittest clusters.
"""
raise NotImplementedError("{} is an abstract class and cannot be directly instantiated. "
"The method predict must be implemented!".format(self.__class__))
def fit(self, X: Iterable[SessionFeatureSet], **kwargs):
"""
:param X: List of sessions to cluster.
:param kwargs:
"""
raise NotImplementedError("{} is an abstract class and cannot be directly instantiated. "
"The method fit must be implemented!".format(self.__class__))
| 39.041096 | 117 | 0.659649 | from typing import Any, Dict, Iterable, List, Text
from ..common import FeatureSet
class SessionFeatureSet(FeatureSet):
"""
This class represents a session's set of features.
"""
def __init__(self, features: Dict[Text, Any], aura_id_name="AURA_ID", aura_id_global_name="AURA_ID_GLOBAL",
session_id_name="SESSION_ID"):
"""
:param features: dictionary of features.
:param aura_id_name: the name of the aura_id feature.
:param aura_id_global_name: the name of the aura_id_global feature.
:param session_id_name: the name of the session_id feature.
"""
super().__init__(features)
self.aura_id_name = aura_id_name
self.aura_id_global_name = aura_id_global_name
self.session_id_name = session_id_name
@property
def aura_id(self):
return self.get_feature(self.aura_id_name)
@property
def aura_id_global(self):
return self.get_feature(self.aura_id_global_name)
@property
def session_id(self):
return self.get_feature(self.session_id_name)
@classmethod
def build_from_row(cls, values: List[Any], names: List[Text], aura_id_name="AURA_ID",
aura_id_global_name="AURA_ID_GLOBAL"):
"""
Build a SessionFeaturesSet from the contents of a row (composed of a list of values and corresponding names).
:param values: list of feature values.
:param names: list of the corresponding names.
:param aura_id_name: the name of the aura_id feature.
:param aura_id_global_name: the name of the aura_id_global feature.
:param session_id_name: the name of the session_id feature.
"""
return SessionFeatureSet(cls.build_features(values, names), aura_id_name=aura_id_name,
aura_id_global_name=aura_id_global_name, session_id_name=aura_id_global_name)
class SessionClusterModel(object):
"""
This class represents a clustering model for sessions.
"""
def predict(self, X: Iterable[SessionFeatureSet], **kwargs) -> List[int]:
"""
Predict the fittest cluster for a list of sessions.
:param X: List of sessions to cluster.
:return: List with the ids of the fittest clusters.
"""
raise NotImplementedError("{} is an abstract class and cannot be directly instantiated. "
"The method predict must be implemented!".format(self.__class__))
def fit(self, X: Iterable[SessionFeatureSet], **kwargs):
"""
:param X: List of sessions to cluster.
:param kwargs:
"""
raise NotImplementedError("{} is an abstract class and cannot be directly instantiated. "
"The method fit must be implemented!".format(self.__class__))
| 164 | 0 | 78 |
f4d42441b96321040a54f16e2b431bb5d1c4b18d | 3,334 | py | Python | include/server/bw/tools/__init__.py | spacebeam/bw | 8f975a2925f309b0038c876f1234595df9798c98 | [
"Apache-2.0"
] | 2 | 2019-10-30T04:26:21.000Z | 2019-10-31T17:26:59.000Z | include/server/bw/tools/__init__.py | spacebeam/bw | 8f975a2925f309b0038c876f1234595df9798c98 | [
"Apache-2.0"
] | 22 | 2019-08-21T17:13:45.000Z | 2020-08-06T00:38:56.000Z | include/server/bw/tools/__init__.py | spacebeam/bw | 8f975a2925f309b0038c876f1234595df9798c98 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# This file is part of bw.
# Distributed under the terms of the last AGPL License.
__author__ = 'Jean Chassoul'
import arrow
import ujson as json
import logging
import uuid
from tornado import gen
def validate_uuid4(uuid_string):
'''
Validate that a UUID string is in
fact a valid uuid4.
Happily, the uuid module does the actual
checking for us.
'''
try:
val = uuid.UUID(uuid_string, version=4)
except ValueError:
# If it's a value error, then the string
# is not a valid hex code for a UUID.
return False
return str(val) == uuid_string
def get_average(total, marks):
'''
Get average from signals
'''
return float(total) / len(marks)
def get_percentage(part, whole):
'''
Get percentage of part and whole.
'''
return "{0:.0f}%".format(float(part)/whole * 100)
@gen.coroutine
def check_json(struct):
'''
Check for malformed JSON
'''
try:
message = json.loads(struct)
except Exception as error:
message = json.dumps({'error': 400})
raise error
return message
@gen.coroutine
def check_times(start, end):
'''
Check times
'''
try:
start = (arrow.get(start) if start
else arrow.get(arrow.utcnow().date()))
end = (arrow.get(end) if end else start.replace(days=+1))
message = {'start': start.timestamp, 'end': end.timestamp}
except Exception as error:
logging.exception(error)
raise error
return message
@gen.coroutine
def check_times_get_timestamp(start, end):
'''
Check times get timestamp
'''
try:
start = (arrow.get(start) if start
else arrow.get(arrow.utcnow().date()))
end = (arrow.get(end) if end else start.replace(days=+1))
message = {'start': start.timestamp, 'end': end.timestamp}
except Exception as error:
logging.exception(error)
raise error
return message
@gen.coroutine
def check_times_get_datetime(start, end):
'''
Check times get datetime
'''
try:
start = (arrow.get(start) if start
else arrow.get(arrow.utcnow().date()))
end = (arrow.get(end) if end else start.replace(days=+1))
message = {'start': start.naive, 'end': end.naive}
except Exception as error:
logging.exception(error)
raise error
return message
def clean_message(struct):
'''
clean message
'''
struct = struct.to_native()
struct = {
key: struct[key] for key in struct if struct[key] is not None
}
return struct
def clean_structure(struct):
'''
clean structure
'''
struct = struct.to_primitive()
struct = {
key: struct[key] for key in struct if struct[key] is not None
}
return struct
def clean_results(results):
'''
clean results
'''
results = results.to_primitive()
results = results.get('results')
results = [
{
key: dic[key] for key in dic if dic[key] is not None
} for dic in results
]
return {'results': results}
def str2bool(boo):
'''
String to boolean
'''
return boo.lower() in ('yes', 'true', 't', '1')
| 21.934211 | 69 | 0.588482 | # -*- coding: utf-8 -*-
# This file is part of bw.
# Distributed under the terms of the last AGPL License.
__author__ = 'Jean Chassoul'
import arrow
import ujson as json
import logging
import uuid
from tornado import gen
def validate_uuid4(uuid_string):
'''
Validate that a UUID string is in
fact a valid uuid4.
Happily, the uuid module does the actual
checking for us.
'''
try:
val = uuid.UUID(uuid_string, version=4)
except ValueError:
# If it's a value error, then the string
# is not a valid hex code for a UUID.
return False
return str(val) == uuid_string
def get_average(total, marks):
'''
Get average from signals
'''
return float(total) / len(marks)
def get_percentage(part, whole):
'''
Get percentage of part and whole.
'''
return "{0:.0f}%".format(float(part)/whole * 100)
@gen.coroutine
def check_json(struct):
'''
Check for malformed JSON
'''
try:
message = json.loads(struct)
except Exception as error:
message = json.dumps({'error': 400})
raise error
return message
@gen.coroutine
def check_times(start, end):
'''
Check times
'''
try:
start = (arrow.get(start) if start
else arrow.get(arrow.utcnow().date()))
end = (arrow.get(end) if end else start.replace(days=+1))
message = {'start': start.timestamp, 'end': end.timestamp}
except Exception as error:
logging.exception(error)
raise error
return message
@gen.coroutine
def check_times_get_timestamp(start, end):
'''
Check times get timestamp
'''
try:
start = (arrow.get(start) if start
else arrow.get(arrow.utcnow().date()))
end = (arrow.get(end) if end else start.replace(days=+1))
message = {'start': start.timestamp, 'end': end.timestamp}
except Exception as error:
logging.exception(error)
raise error
return message
@gen.coroutine
def check_times_get_datetime(start, end):
'''
Check times get datetime
'''
try:
start = (arrow.get(start) if start
else arrow.get(arrow.utcnow().date()))
end = (arrow.get(end) if end else start.replace(days=+1))
message = {'start': start.naive, 'end': end.naive}
except Exception as error:
logging.exception(error)
raise error
return message
def clean_message(struct):
'''
clean message
'''
struct = struct.to_native()
struct = {
key: struct[key] for key in struct if struct[key] is not None
}
return struct
def clean_structure(struct):
'''
clean structure
'''
struct = struct.to_primitive()
struct = {
key: struct[key] for key in struct if struct[key] is not None
}
return struct
def clean_results(results):
'''
clean results
'''
results = results.to_primitive()
results = results.get('results')
results = [
{
key: dic[key] for key in dic if dic[key] is not None
} for dic in results
]
return {'results': results}
def str2bool(boo):
'''
String to boolean
'''
return boo.lower() in ('yes', 'true', 't', '1')
| 0 | 0 | 0 |
2aa9e4e31a22f81d608b3540fff2696084e6a97e | 5,586 | py | Python | pyox/webhdfs.py | alexmilowski/webhdfs | c549f2db2f05fbe2348117870522c519c8da4613 | [
"Apache-2.0"
] | 8 | 2018-10-25T04:31:22.000Z | 2021-08-05T20:12:14.000Z | pyox/webhdfs.py | alexmilowski/webhdfs | c549f2db2f05fbe2348117870522c519c8da4613 | [
"Apache-2.0"
] | 5 | 2018-02-09T08:34:08.000Z | 2018-03-22T18:56:17.000Z | pyox/webhdfs.py | alexmilowski/webhdfs | c549f2db2f05fbe2348117870522c519c8da4613 | [
"Apache-2.0"
] | 5 | 2019-01-29T19:17:53.000Z | 2020-12-30T15:47:17.000Z |
from pyox.client import Client, ServiceError
| 37.24 | 113 | 0.61493 |
from pyox.client import Client, ServiceError
def absolute_path(path):
if len(path)>0 and path[0]!='/':
path = '/'+path
return path
class WebHDFS(Client):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.service = 'webhdfs'
self.read_chunk_size = 65536
def list_directory(self,path):
path = absolute_path(path)
url = '{}{}'.format(self.service_url(),path)
req = self.get(url,params={'op':'LISTSTATUS'},allow_redirects=False)
#print(req.url)
#req = requests.get(url,auth=None)
if req.status_code==200:
data = req.json()
result = {}
for entry in data['FileStatuses']['FileStatus']:
result[entry['pathSuffix']] = entry
return result
else:
raise ServiceError(req.status_code,'Cannot access path {}'.format(path),req)
def open(self,path,offset=None,length=None,buffersize=None):
path = absolute_path(path)
url = '{}{}?op=OPEN'.format(self.service_url(),path)
if offset is not None:
url += '&offset={}'.format(offset)
if length is not None:
url += '&length={}'.format(length)
if buffersize is not None:
url += '&buffersize={}'.format(buffersize)
#print(url)
#open_req = self.get(url)
#if open_req.status_code==200:
# return open_req.iter_content(chunk_size=self.read_chunk_size)
open_req = self.get(url,allow_redirects=False)
if open_req.status_code==307:
location = open_req.headers['Location'];
read_req = self.get(location,allow_redirects=False,stream=True)
if read_req.status_code==200:
return read_req.iter_content(chunk_size=self.read_chunk_size)
else:
raise ServiceError(read_req.status_code,'Cannot open datanode location {}'.format(location),read_req)
else:
raise ServiceError(open_req.status_code,'Cannot open path {}'.format(path),open_req)
def make_directory(self,path,permission=None):
path = absolute_path(path)
url = '{}{}?op=MKDIRS'.format(self.service_url(),path)
if permission is not None:
url += '&permission={}'.format(permission)
#print(url)
req = self.put(url)
if req.status_code!=200:
raise ServiceError(req.status_code,'Cannot create path {}'.format(path),req)
msg = req.json()
return msg['boolean']
def move(self,sourcepath,destpath):
sourcepath = absolute_path(sourcepath)
destpath = absolute_path(destpath)
url = '{}{}?op=RENAME&destination={}'.format(self.service_url(),sourcepath,destpath)
#print(url)
req = self.put(url)
if req.status_code!=200:
raise ServiceError(req.status_code,'Cannot move path {} to {}'.format(sourcepath,destpath),req)
msg = req.json()
return msg['boolean']
def remove(self,path,recursive=False):
path = absolute_path(path)
recursiveParam = 'true' if recursive else 'false'
url = '{}{}?op=DELETE&recursive={}'.format(self.service_url(),path,recursiveParam)
#print(url)
req = self.delete(url)
if req.status_code!=200:
raise ServiceError(req.status_code,'Cannot delete path {}'.format(path),req)
msg = req.json()
return msg['boolean']
def status(self,path):
url = '{}{}?op=GETFILESTATUS'.format(self.service_url(),absolute_path(path))
#print(url)
req = self.get(url)
if req.status_code!=200:
raise ServiceError(req.status_code,'Cannot status path {}'.format(path),req)
msg = req.json()
return msg['FileStatus']
def copy(self,data,path,size=-1,overwrite=False):
path = absolute_path(path)
overwriteParam = 'true' if overwrite else 'false'
url = '{}{}?op=CREATE&overwrite={}'.format(self.service_url(),path,overwriteParam)
#print(url)
headers = {}
headers['Content-Type'] = 'application/octet-stream'
if size >= 0:
headers['Content-Length'] = str(size)
open_req = self.put(
url,
allow_redirects=False,
headers={'Content-Length' : '0'})
if open_req.status_code==307:
location = open_req.headers['Location'];
#print(location)
req = self.put(
location,
data=data,
headers=headers)
if req.status_code!=201:
raise ServiceError(req.status_code,'Cannot copy to path {}'.format(path),req)
else:
raise ServiceError(req.status_code,'Cannot open path {}'.format(path),open_req)
return True
def append(self,data,path,size=-1,buffersize=None):
path = absolute_path(path)
url = '{}{}?op=APPEND&overwrite={}'.format(self.service_url(),path)
if buffersize is not None:
url += '&buffersize={}'.format(buffersize)
#print(url)
open_req = self.post(
url,
allow_redirects=False,
headers={'Content-Length' : '0'})
if open_req.status_code==307:
headers = {}
headers['Content-Type'] = 'application/octet-stream'
if size >= 0:
headers['Content-Length'] = str(size)
location = open_req.headers['Location'];
#print(location)
req = self.post(
location,
data=data,
headers=headers)
if req.status_code!=200:
raise ServiceError(req.status_code,'Cannot append to path {}'.format(path),req)
else:
raise ServiceError(req.status_code,'Cannot append path {}'.format(path),open_req)
return True
| 5,259 | 1 | 280 |
356caf4ee17cfa856f9452e5b81fc78885d6f859 | 122 | py | Python | utils/poweroff_restart.py | ndkjing/usv | 132e021432a0344a22914aaf68da7d7955d7331f | [
"MIT"
] | null | null | null | utils/poweroff_restart.py | ndkjing/usv | 132e021432a0344a22914aaf68da7d7955d7331f | [
"MIT"
] | null | null | null | utils/poweroff_restart.py | ndkjing/usv | 132e021432a0344a22914aaf68da7d7955d7331f | [
"MIT"
] | 1 | 2021-09-04T10:27:30.000Z | 2021-09-04T10:27:30.000Z | from os import system
# 重启电脑
| 11.090909 | 31 | 0.655738 | from os import system
# 重启电脑
def restart():
system('sudo reboot')
def poweroff():
system('sudo shutdown now')
| 45 | 0 | 45 |
33b53b231a99d94ff5cc752b86bbc0159b2e1fb3 | 17,472 | py | Python | pyctrl/flask/server.py | ComplexArts/pyctrl-core | a72bd53924410c2e7f1e71c8188a0391550febdd | [
"Apache-2.0"
] | 12 | 2017-06-20T13:20:40.000Z | 2021-01-18T00:12:10.000Z | pyctrl/flask/server.py | mcdeoliveira/beaglebone | 6c6062c6d1e9902178500abcd10be6ac0bcf043d | [
"Apache-2.0"
] | 2 | 2017-06-12T15:17:24.000Z | 2018-01-30T18:22:19.000Z | pyctrl/flask/server.py | mcdeoliveira/beaglebone | 6c6062c6d1e9902178500abcd10be6ac0bcf043d | [
"Apache-2.0"
] | 4 | 2017-09-25T12:19:19.000Z | 2019-01-31T21:46:24.000Z | from flask import Flask, request, render_template, jsonify, make_response, redirect, flash, url_for
from functools import wraps
import re
import pyctrl
from pyctrl.block import Logger
import warnings
import importlib
import traceback, sys, io
from pyctrl.flask import JSONEncoder, JSONDecoder
encoder = JSONEncoder(sort_keys = True, indent = 4)
decoder = JSONDecoder()
# decorators
# decode
# decode_kwargs_aux
# decode_kwargs
# json_response
# Server class
if __name__ == "__main__":
try:
import os
os.environ['RCPY_NO_HANDLERS'] = 't'
from pyctrl.rc import Controller
debug = False
RCPY = True
except:
from pyctrl.timer import Controller
debug = True
RCPY = False
try:
app = Server(__name__)
app.config['SECRET_KEY'] = 'secret!'
# initialize controller
app.set_controller(controller = Controller(period = .01))
# run app
app.run(host='0.0.0.0',
debug = debug)
except:
pass
finally:
sys.exit(0)
| 33.926214 | 129 | 0.54802 | from flask import Flask, request, render_template, jsonify, make_response, redirect, flash, url_for
from functools import wraps
import re
import pyctrl
from pyctrl.block import Logger
import warnings
import importlib
import traceback, sys, io
from pyctrl.flask import JSONEncoder, JSONDecoder
encoder = JSONEncoder(sort_keys = True, indent = 4)
decoder = JSONDecoder()
# decorators
# decode
def decode_value(f):
@wraps(f)
def wrapper(label, value, *args, **kwargs):
return f(label, decoder.decode(value), *args, **kwargs)
return wrapper
# decode_kwargs_aux
def decode_kwargs_aux(e):
if len(e) == 1:
return decoder.decode(e[0])
elif len(e) > 1:
return [decoder.decode(v) for v in e]
else:
return None
# decode_kwargs
def decode_kwargs(f):
@wraps(f)
def wrapper(*args, **kwargs):
try:
kwargs.update({k: decode_kwargs_aux(request.args.getlist(k))
for k in request.args.keys()})
except:
raise Exception("Arguments '{}' are not json compatible".format(request.args))
#print('>>> kwargs = {}'.format(kwargs))
return f(*args, **kwargs)
return wrapper
# json_response
def json_response(f):
@wraps(f)
def wrapper(*args, **kwargs):
try:
retval = f(*args, **kwargs)
if retval is None:
retval = { 'status': 'success' }
except Exception as e:
message = io.StringIO()
traceback.print_exc(file=message)
retval = { 'status': 'error',
'message': message.getvalue() }
next = request.args.get('next', None)
if next:
if 'status' in retval and retval['status'] == 'error':
flash(retval['message'])
return redirect(url_for(next))
else:
return jsonify(retval)
return wrapper
# Server class
class Server(Flask):
def __init__(self, *args, **kwargs):
self.controller = None
self.base_url = ''
# call super
super().__init__(*args, **kwargs)
# change json_encoder
self.json_encoder = JSONEncoder
# set api entry points
# index, info and scope
self.add_url_rule(self.base_url + '/',
view_func = self.index)
self.add_url_rule(self.base_url + '/info',
view_func = self.info)
self.add_url_rule(self.base_url + '/scope/<path:label>',
view_func = self.scope)
# download controller
self.add_url_rule(self.base_url + '/download',
view_func = self.download)
# upload controller
self.add_url_rule(self.base_url + '/upload',
methods=['GET', 'POST'],
view_func = self.upload)
# reset
self.add_url_rule(self.base_url + '/reset',
view_func = self.reset)
# set controller
self.add_url_rule(self.base_url + '/set/controller/<module>/<pyctrl_class>',
view_func = self.reset_controller)
# start and stop
self.add_url_rule(self.base_url + '/start',
view_func = self.start)
self.add_url_rule(self.base_url + '/stop',
view_func = self.stop)
# signals
self.add_url_rule(self.base_url + '/add/signal/<path:label>',
view_func = self.add_signal)
self.add_url_rule(self.base_url + '/remove/signal/<path:label>',
view_func = self.remove_signal)
self.add_url_rule(self.base_url + '/get/signal/<path:label>',
view_func = self.get_signal)
self.add_url_rule(self.base_url + '/set/signal/<path:label>/<value>',
view_func = self.set_signal)
self.add_url_rule(self.base_url + '/list/signals',
view_func = self.list_signals)
# sources
self.add_url_rule(self.base_url + '/add/source/<path:label>/<module_name>/<class_name>',
view_func = self.add_source)
self.add_url_rule(self.base_url + '/remove/source/<path:label>',
view_func = self.remove_source)
self.add_url_rule(self.base_url + '/get/source/<path:label>',
view_func = self.get_source)
self.add_url_rule(self.base_url + '/set/source/<path:label>',
view_func = self.set_source)
self.add_url_rule(self.base_url + '/html/source/<path:label>',
view_func = self.html_source)
# filters
self.add_url_rule(self.base_url + '/add/filter/<path:label>/<module_name>/<class_name>',
view_func = self.add_filter)
self.add_url_rule(self.base_url + '/remove/filter/<path:label>',
view_func = self.remove_filter)
self.add_url_rule(self.base_url + '/get/filter/<path:label>',
view_func = self.get_filter)
self.add_url_rule(self.base_url + '/set/filter/<path:label>',
view_func = self.set_filter)
self.add_url_rule(self.base_url + '/html/filter/<path:label>',
view_func = self.html_filter)
# sinks
self.add_url_rule(self.base_url + '/add/sink/<path:label>/<module_name>/<class_name>',
view_func = self.add_sink)
self.add_url_rule(self.base_url + '/remove/sink/<path:label>',
view_func = self.remove_sink)
self.add_url_rule(self.base_url + '/get/sink/<path:label>',
view_func = self.get_sink)
self.add_url_rule(self.base_url + '/set/sink/<path:label>',
view_func = self.set_sink)
self.add_url_rule(self.base_url + '/html/sink/<path:label>',
view_func = self.html_sink)
# timers
self.add_url_rule(self.base_url + '/add/timer/<path:label>/<module_name>/<class_name>',
view_func = self.add_timer)
self.add_url_rule(self.base_url + '/remove/timer/<path:label>',
view_func = self.remove_timer)
self.add_url_rule(self.base_url + '/get/timer/<path:label>',
view_func = self.get_timer)
self.add_url_rule(self.base_url + '/set/timer/<path:label>',
view_func = self.set_timer)
self.add_url_rule(self.base_url + '/html/timer/<path:label>',
view_func = self.html_timer)
def set_controller(self, **kwargs):
# Create new controller?
if 'module' in kwargs or 'pyctrl_class' in kwargs:
module = kwargs.pop('module', 'pyctrl')
pyctrl_class = kwargs.pop('pyctrl_class', 'Controller')
ckwargs = kwargs.pop('kwargs', {})
if len(kwargs) > 0:
raise Exception("webserver.reset():: Unknown parameter(s) '{}'".format(', '.join(str(k) for k in kwargs.keys())))
try:
if True:
warnings.warn("> Installing new instance of '{}.{}({})' as controller".format(module, pyctrl_class, ckwargs))
obj_class = getattr(importlib.import_module(module),
pyctrl_class)
controller = obj_class(**ckwargs)
# print('obj_class = {}'.format(obj_class))
# print('_controller = {}'.format(_controller))
# Make sure it is an instance of pyctrl.Controller
if not isinstance(controller, pyctrl.Controller):
raise Exception("Object '{}.{}' is not and instance of pyctrl.Controller".format(module, pyctrl_class))
self.controller = controller
except Exception as e:
raise Exception("Error resetting controller: {}".format(e))
elif 'controller' in kwargs:
controller = kwargs.pop('controller')
# Make sure it is an instance of pyctrl.Controller
if not isinstance(controller, pyctrl.Controller):
raise Exception("Object '{}.{}' is not and instance of pyctrl.Controller".format(module, pyctrl_class))
self.controller = controller
# auxiliary
def get_keys(self, method, type_name,
label, **kwargs):
# get keys
keys = kwargs.get('keys', '')
if keys and not isinstance(keys, (list,tuple)):
keys = [keys]
print('keys = {}'.format(keys))
# get container
(container,label) = self.controller.resolve_label(label)
if keys:
# return attributes
if len(keys) > 1:
return method(label, *keys)
else:
return {keys[0]: method(label, *keys)}
else:
# return container
return {label:
getattr(container, type_name)[label]['block']}
# handlers
def index(self):
sinks = [ {'label': k, 'is_logger': isinstance(v['block'], Logger)}
for (k,v) in self.controller.sinks.items() ]
return render_template('index.html',
baseurl = self.base_url,
class_name = self.controller.info('class'),
signals = sorted(self.controller.list_signals()),
sources = self.controller.list_sources(),
filters = self.controller.list_filters(),
sinks = sinks,
timers = self.controller.list_timers(),
is_running = self.controller.get_signal('is_running'))
def info(self):
return self.controller.html()
def scope(self, label, *args, **kwargs):
return render_template('scope.html',
baseurl = self.base_url,
logger = label)
def download(self):
response = make_response(jsonify(self.controller))
response.headers["Content-Disposition"] \
= "attachment; filename=controller.json"
return response
def upload(self, **kwargs):
# post?
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash("Form has no field 'part'")
else:
# has file
file = request.files['file']
# empty filename?
if not file or file.filename == '':
flash('No file selected')
else:
# there is a file
try:
controller = decoder.decode(file.read().decode('utf-8'))
# print('controller = {}'.format(controller))
self.set_controller(controller = controller)
flash('New controller succesfully loaded.')
except Exception as e:
message = io.StringIO()
traceback.print_exc(file=message)
flash('Could not load controller.')
flash(message.getvalue())
return redirect(self.base_url + '/')
@json_response
@decode_kwargs
def reset(self, **kwargs):
return self.controller.reset(**kwargs)
@decode_kwargs
def reset_controller(self, **kwargs):
# set new controller
self.set_controller(**kwargs)
# redirect to base
return redirect(self.base_url + '/')
@json_response
def start(self):
return self.controller.start()
@json_response
def stop(self):
return self.controller.stop()
@json_response
def add_signal(self, *args, **kwargs):
return self.controller.add_signal(*args, **kwargs)
@json_response
def remove_signal(self, *args, **kwargs):
return self.controller.remove_signal(*args, **kwargs)
@json_response
def get_signal(self, label, *args, **kwargs):
return {label: self.controller.get_signal(label, *args, **kwargs)}
@json_response
@decode_value
def set_signal(self, *args, **kwargs):
return self.controller.set_signal(*args, **kwargs)
@json_response
def list_signals(self):
return self.controller.list_signals()
# sources
@json_response
@decode_kwargs
def add_source(self, label, module_name, class_name, **kwargs):
return self.controller.add_source(label, (module_name, class_name),
**kwargs)
@json_response
def remove_source(self, *args, **kwargs):
return self.controller.remove_source(*args, **kwargs)
@json_response
@decode_kwargs
def get_source(self, label, *args, **kwargs):
return self.get_keys(self.controller.get_source, 'sources',
label, *args, **kwargs)
@json_response
@decode_kwargs
def set_source(self, *args, **kwargs):
return self.controller.set_source(*args, **kwargs)
@decode_kwargs
def html_source(self, label, *args, **kwargs):
# get container
(container,label) = self.controller.resolve_label(label)
return self.controller.sources[label]['block'].html();
# filters
@json_response
@decode_kwargs
def add_filter(self, label, module_name, class_name, **kwargs):
return self.controller.add_filter(label, (module_name, class_name),
**kwargs)
@json_response
def remove_filter(self, *args, **kwargs):
return self.controller.remove_filter(*args, **kwargs)
@json_response
@decode_kwargs
def get_filter(self, label, *args, **kwargs):
return self.get_keys(self.controller.get_filter, 'filters',
label, *args, **kwargs)
@json_response
@decode_kwargs
def set_filter(self, *args, **kwargs):
return self.controller.set_filter(*args, **kwargs)
@decode_kwargs
def html_filter(self, label, *args, **kwargs):
# get container
(container,label) = self.controller.resolve_label(label)
return self.controller.filters[label]['block'].html();
# sinks
@json_response
@decode_kwargs
def add_sink(self, label, module_name, class_name, **kwargs):
return self.controller.add_sink(label, (module_name, class_name),
**kwargs)
@json_response
def remove_sink(self, *args, **kwargs):
return self.controller.remove_sink(*args, **kwargs)
@json_response
@decode_kwargs
def get_sink(self, label, *args, **kwargs):
return self.get_keys(self.controller.get_sink, 'sinks',
label, *args, **kwargs)
@json_response
@decode_kwargs
def set_sink(self, *args, **kwargs):
return self.controller.set_sink(*args, **kwargs)
@decode_kwargs
def html_sink(self, label, *args, **kwargs):
# get container
(container,label) = self.controller.resolve_label(label)
return self.controller.sinks[label]['block'].html();
# timers
@json_response
@decode_kwargs
def add_timer(self, label, module_name, class_name, **kwargs):
return self.controller.add_timer(label, (module_name, class_name),
**kwargs)
@json_response
def remove_timer(self, *args, **kwargs):
return self.controller.remove_timer(*args, **kwargs)
@json_response
@decode_kwargs
def get_timer(self, label, *args, **kwargs):
return self.get_keys(self.controller.get_timer, 'timers',
label, *args, **kwargs)
@json_response
@decode_kwargs
def set_timer(self, *args, **kwargs):
return self.controller.set_timer(*args, **kwargs)
@decode_kwargs
def html_timer(self, label, *args, **kwargs):
# get container
(container,label) = self.controller.resolve_label(label)
return self.controller.timers[label]['block'].html();
if __name__ == "__main__":
try:
import os
os.environ['RCPY_NO_HANDLERS'] = 't'
from pyctrl.rc import Controller
debug = False
RCPY = True
except:
from pyctrl.timer import Controller
debug = True
RCPY = False
try:
app = Server(__name__)
app.config['SECRET_KEY'] = 'secret!'
# initialize controller
app.set_controller(controller = Controller(period = .01))
# run app
app.run(host='0.0.0.0',
debug = debug)
except:
pass
finally:
sys.exit(0)
| 14,217 | 2,037 | 111 |
0dc7c93fd4a387857947da414073d7a073aa653c | 1,925 | py | Python | posts/api/views.py | loafbaker/django_blog | abd912962318527473654c9d2043f4a361a4abf1 | [
"MIT"
] | null | null | null | posts/api/views.py | loafbaker/django_blog | abd912962318527473654c9d2043f4a361a4abf1 | [
"MIT"
] | null | null | null | posts/api/views.py | loafbaker/django_blog | abd912962318527473654c9d2043f4a361a4abf1 | [
"MIT"
] | null | null | null | from django.db.models import Q
from rest_framework.filters import SearchFilter, OrderingFilter
from rest_framework.generics import ListAPIView, CreateAPIView, RetrieveAPIView, RetrieveUpdateAPIView, DestroyAPIView
from rest_framework.permissions import IsAuthenticated, IsAuthenticatedOrReadOnly
from posts.models import Post
from .pagination import PostPageNumberPagination
from .permissions import IsOwnerOrReadOnly
from .serializers import PostCreateUpdateSerializer, PostDetailSerializer, PostListSerializer | 36.320755 | 118 | 0.818182 | from django.db.models import Q
from rest_framework.filters import SearchFilter, OrderingFilter
from rest_framework.generics import ListAPIView, CreateAPIView, RetrieveAPIView, RetrieveUpdateAPIView, DestroyAPIView
from rest_framework.permissions import IsAuthenticated, IsAuthenticatedOrReadOnly
from posts.models import Post
from .pagination import PostPageNumberPagination
from .permissions import IsOwnerOrReadOnly
from .serializers import PostCreateUpdateSerializer, PostDetailSerializer, PostListSerializer
class PostListAPIView(ListAPIView):
queryset = Post.objects.all()
serializer_class = PostListSerializer
filter_backends = [SearchFilter, OrderingFilter]
search_fields = ['title', 'content', 'user__first_name', 'user__last_name']
pagination_class = PostPageNumberPagination
def get_queryset(self):
queryset_list = super(PostListAPIView, self).get_queryset()
query = self.request.GET.get('q')
if query:
queryset_list = queryset_list.filter(
Q(title__icontains=query) |
Q(content__icontains=query) |
Q(user__first_name=query) |
Q(user__last_name=query)
)
return queryset_list
class PostCreateAPIView(CreateAPIView):
permission_classes = [IsAuthenticated]
queryset = Post.objects.all()
serializer_class = PostCreateUpdateSerializer
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class PostDetailAPIView(RetrieveAPIView):
queryset = Post.objects.all()
serializer_class = PostDetailSerializer
lookup_field = 'slug'
class PostUpdateAPIView(RetrieveUpdateAPIView):
permission_classes = [IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly]
queryset = Post.objects.all()
serializer_class = PostCreateUpdateSerializer
lookup_field = 'slug'
class PostDeleteAPIView(DestroyAPIView):
permission_classes = [IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly]
queryset = Post.objects.all()
serializer_class = PostListSerializer
lookup_field = 'slug' | 366 | 932 | 115 |
5f6900da0b97f7cb9568e67607b74fc2b4feca0c | 3,732 | py | Python | proxy-alpha.py | ARTRoyale/ZapRoyale | 984d72ee942b29f18250eae130d083d29151bd68 | [
"MIT"
] | null | null | null | proxy-alpha.py | ARTRoyale/ZapRoyale | 984d72ee942b29f18250eae130d083d29151bd68 | [
"MIT"
] | null | null | null | proxy-alpha.py | ARTRoyale/ZapRoyale | 984d72ee942b29f18250eae130d083d29151bd68 | [
"MIT"
] | null | null | null | # by ARTRoyale (A. Lebedev) for ZapRoyale
import socket
import threading
import struct
import os
import uuid
import random
# начинаем дебаг
global debugmode
debugmode = True
global gl_server_address
gl_server_address = ('***.***.*.**', 9339)
if __name__ == "__main__":
port_num = 9339
print('[INFO] Proksi podkluchaetsa k portu', port_num)
ThreadedServer('0.0.0.0',port_num).listen()
| 32.172414 | 111 | 0.549303 | # by ARTRoyale (A. Lebedev) for ZapRoyale
import socket
import threading
import struct
import os
import uuid
import random
# начинаем дебаг
global debugmode
debugmode = True
def debug(debmessage):
if debmessage:
if debugmode:
print('[DEBUG]',debmessage)
else:
pass
else:
pass
def randomBytes(n):
return bytes(random.getrandbits(8) for i in range(n))
def mockrcv(mock):
rdata = mock.recv(10086)
if not rdata:
return "nulldata"
return rdata
def serverlisten(mock, client):
ndata=mockrcv(mock)
if(ndata=="nulldata"):
print('[WARNING] Net proksi!')
return False
else:
lmessage_id = int(str(struct.unpack('>H', ndata[:2]))[1:-2])
if (lmessage_id >= 30000 or lmessage_id < 10000):
lmessage_id = 'Neizvestniy messadzh'
elif len(str(lmessage_id)) is not 5:
lmessage_id = 'Neizvestniy messadzh'
print('[OK] Servak => Proksi', lmessage_id)
response = ndata
try:
client.send(response)
except ConnectionAbortedError:
client.close()
debug('closed')
global gl_server_address
gl_server_address = ('***.***.*.**', 9339)
class ThreadedServer(object):
def __init__(self, host, port):
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.host, self.port))
print('[OK] Zapusk Proksi')
def listen(self):
self.sock.listen(5)
while True:
client, address = self.sock.accept()
print('[INFO] Klient => Proksi', address, 'podklucheno')
client.settimeout(60) #таймаут
mock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("[INFO] Proksi =><= Server pudkluchen k",gl_server_address[0],'on port',gl_server_address[1])
mock.connect(gl_server_address)
print('[INFO] Proksi =><= Server podkluchen')
threading.Thread(target = self.listenToClient,args = (client,address,mock)).start()
def listenToClient(self, client, address, mock):
while True:
try:
data = client.recv(4098)
except:
debug('closed')
mock.close()
try:
message_id = int(str(struct.unpack('>H', data[:2]))[1:-2])
except:
message_id = 'Neizvestniy messadzh'
try:
if (message_id >= 30000 or message_id < 10000):
message_id = 'Neizvestniy messadzh'
except:
message_id = 'Neizvestniy messadzh'
try:
if len(str(message_id)) is not 5:
message_id = 'Neizvestniy messadzh'
except:
message_id = 'Neizvestniy messadzh'
print('[OK] Klient => Proksi', message_id)
fmessage = data
try:
mock.sendall(fmessage)
except:
debug('done closing?')
break
print('[OK] Proksi => Server', message_id)
while 1:
debug('Slushayu servak')
r = serverlisten(mock, client);
if r == False:
debug('Net infy ot servaka')
break
else:
debug('Danniye polucheny ot servaka')
break
if __name__ == "__main__":
port_num = 9339
print('[INFO] Proksi podkluchaetsa k portu', port_num)
ThreadedServer('0.0.0.0',port_num).listen()
| 3,126 | 8 | 192 |
a5087d2093117c0c8a944443b7263a2f96effcb6 | 213 | py | Python | api/api_scheme.py | raywu60kg/tensorlfow-project-demo | acd1085788da289ec7ed21ec0d46c9599188e32c | [
"MIT"
] | null | null | null | api/api_scheme.py | raywu60kg/tensorlfow-project-demo | acd1085788da289ec7ed21ec0d46c9599188e32c | [
"MIT"
] | null | null | null | api/api_scheme.py | raywu60kg/tensorlfow-project-demo | acd1085788da289ec7ed21ec0d46c9599188e32c | [
"MIT"
] | null | null | null | from pydantic import BaseModel
# class MetricsOutput(BaseModel):
# name: str
# metrics: dict
| 14.2 | 36 | 0.723005 | from pydantic import BaseModel
class HealthCheckOutput(BaseModel):
health: bool
# class MetricsOutput(BaseModel):
# name: str
# metrics: dict
class RetrainModelOutput(BaseModel):
train: bool
| 0 | 62 | 46 |
de32d45449405b3ec00b4326a7b6348906ee8392 | 742 | py | Python | api/blueprints/users/views/roles.py | mohamed040406/API | 40ceb2b35271938d90e4309a6cdcf63ba0c17f0b | [
"MIT"
] | 1 | 2021-05-01T02:25:27.000Z | 2021-05-01T02:25:27.000Z | api/blueprints/users/views/roles.py | mohamed040406/API | 40ceb2b35271938d90e4309a6cdcf63ba0c17f0b | [
"MIT"
] | null | null | null | api/blueprints/users/views/roles.py | mohamed040406/API | 40ceb2b35271938d90e4309a6cdcf63ba0c17f0b | [
"MIT"
] | null | null | null | from quart import request, jsonify
import time
from api.models import User
from .. import bp
import utils
request: utils.Request
@bp.route("/<int:user_id>/roles", methods=["GET"])
@utils.auth_required
async def fetch_user_roles(user_id: int):
"""Fetch the specific users roles"""
query = """
SELECT json_agg(json_build_object(
'name', r.name,
'base', r.base,
'id', r.id::TEXT,
'color', r.color,
'position', r.position,
'permissions', r.permissions::TEXT
))
FROM roles r WHERE r.id IN (
SELECT ur.role_id FROM userroles WHERE ur.user_id = $1
)
"""
record = await User.pool.fetchval(query, user_id)
return jsonify(roles=record)
| 24.733333 | 66 | 0.617251 | from quart import request, jsonify
import time
from api.models import User
from .. import bp
import utils
request: utils.Request
@bp.route("/<int:user_id>/roles", methods=["GET"])
@utils.auth_required
async def fetch_user_roles(user_id: int):
"""Fetch the specific users roles"""
query = """
SELECT json_agg(json_build_object(
'name', r.name,
'base', r.base,
'id', r.id::TEXT,
'color', r.color,
'position', r.position,
'permissions', r.permissions::TEXT
))
FROM roles r WHERE r.id IN (
SELECT ur.role_id FROM userroles WHERE ur.user_id = $1
)
"""
record = await User.pool.fetchval(query, user_id)
return jsonify(roles=record)
| 0 | 0 | 0 |
d262a3348286d2c2acf7e83331728949dbe00b99 | 2,328 | py | Python | mkgta.py | shaun95/Tacotron2-PyTorch | b1761fd7660e56adf39f3c8d02852fbaec1da2c5 | [
"MIT"
] | 1 | 2022-03-10T20:02:58.000Z | 2022-03-10T20:02:58.000Z | mkgta.py | shaun95/Tacotron2-PyTorch | b1761fd7660e56adf39f3c8d02852fbaec1da2c5 | [
"MIT"
] | null | null | null | mkgta.py | shaun95/Tacotron2-PyTorch | b1761fd7660e56adf39f3c8d02852fbaec1da2c5 | [
"MIT"
] | null | null | null | import os
import torch
import argparse
import numpy as np
import matplotlib.pylab as plt
from text import text_to_sequence
from model.model import Tacotron2
from hparams import hparams as hps
from utils.util import mode, to_var, to_arr
from utils.audio import load_wav, save_wav, melspectrogram
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--ckpt_pth', type = str, default = '',
required = True, help = 'path to load checkpoints')
parser.add_argument('-n', '--npy_pth', type = str, default = 'dump',
help = 'path to save mels')
args = parser.parse_args()
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
model = load_model(args.ckpt_pth)
flist = files_to_list()
for x in flist:
ret = infer(x[0], x[1], model)
name = x[0].split('/')[-1].split('.wav')[0]
if args.npy_pth != '':
save_mel(ret, args.npy_pth, name)
| 31.04 | 75 | 0.627577 | import os
import torch
import argparse
import numpy as np
import matplotlib.pylab as plt
from text import text_to_sequence
from model.model import Tacotron2
from hparams import hparams as hps
from utils.util import mode, to_var, to_arr
from utils.audio import load_wav, save_wav, melspectrogram
def files_to_list(fdir = 'data'):
f_list = []
with open(os.path.join(fdir, 'metadata.csv'), encoding = 'utf-8') as f:
for line in f:
parts = line.strip().split('|')
wav_path = os.path.join(fdir, 'wavs', '%s.wav' % parts[0])
f_list.append([wav_path, parts[1]])
return f_list
def load_model(ckpt_pth):
ckpt_dict = torch.load(ckpt_pth)
model = Tacotron2()
model.load_state_dict(ckpt_dict['model'])
model = mode(model, True).eval()
model.decoder.train()
model.postnet.train()
return model
def infer(wav_path, text, model):
sequence = text_to_sequence(text, hps.text_cleaners)
sequence = to_var(torch.IntTensor(sequence)[None, :]).long()
mel = melspectrogram(load_wav(wav_path))
mel_in = to_var(torch.Tensor([mel]))
r = mel_in.shape[2]%hps.n_frames_per_step
if r != 0:
mel_in = mel_in[:, :, :-r]
sequence = torch.cat([sequence, sequence], 0)
mel_in = torch.cat([mel_in, mel_in], 0)
_, mel_outputs_postnet, _, _ = model.teacher_infer(sequence, mel_in)
ret = mel
if r != 0:
ret[:, :-r] = to_arr(mel_outputs_postnet[0])
else:
ret = to_arr(mel_outputs_postnet[0])
return ret
def save_mel(res, pth, name):
out = os.path.join(pth, name)
np.save(out, res)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--ckpt_pth', type = str, default = '',
required = True, help = 'path to load checkpoints')
parser.add_argument('-n', '--npy_pth', type = str, default = 'dump',
help = 'path to save mels')
args = parser.parse_args()
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
model = load_model(args.ckpt_pth)
flist = files_to_list()
for x in flist:
ret = infer(x[0], x[1], model)
name = x[0].split('/')[-1].split('.wav')[0]
if args.npy_pth != '':
save_mel(ret, args.npy_pth, name)
| 1,224 | 0 | 92 |
78a847a9735c69df68fbcc09cc32244397b197f6 | 5,580 | py | Python | doors-detector/doors_detector/dataset/dataset_doors_final/datasets_creator_doors_final.py | micheleantonazzi/master-thesis-robust-door-detector | 685c2d13a6617978c8fc0324e92aab82f5a04b85 | [
"Apache-2.0"
] | null | null | null | doors-detector/doors_detector/dataset/dataset_doors_final/datasets_creator_doors_final.py | micheleantonazzi/master-thesis-robust-door-detector | 685c2d13a6617978c8fc0324e92aab82f5a04b85 | [
"Apache-2.0"
] | null | null | null | doors-detector/doors_detector/dataset/dataset_doors_final/datasets_creator_doors_final.py | micheleantonazzi/master-thesis-robust-door-detector | 685c2d13a6617978c8fc0324e92aab82f5a04b85 | [
"Apache-2.0"
] | null | null | null | from typing import Union, Tuple
import pandas as pd
from sklearn.utils import shuffle
from doors_detector.dataset.torch_dataset import TRAIN_SET, TEST_SET, SET
from generic_dataset.dataset_manager import DatasetManager
from doors_detector.dataset.dataset_doors_final.door_sample import DoorSample, DOOR_LABELS
from sklearn.model_selection import train_test_split
from doors_detector.dataset.dataset_doors_final.final_doors_dataset import DatasetDoorsFinal
| 54.174757 | 174 | 0.682437 | from typing import Union, Tuple
import pandas as pd
from sklearn.utils import shuffle
from doors_detector.dataset.torch_dataset import TRAIN_SET, TEST_SET, SET
from generic_dataset.dataset_manager import DatasetManager
from doors_detector.dataset.dataset_doors_final.door_sample import DoorSample, DOOR_LABELS
from sklearn.model_selection import train_test_split
from doors_detector.dataset.dataset_doors_final.final_doors_dataset import DatasetDoorsFinal
class DatasetsCreatorDoorsFinal:
def __init__(self, dataset_path: str):
self._dataset_path = dataset_path
self._dataset_manager = DatasetManager(dataset_path=dataset_path, sample_class=DoorSample)
self._dataframe = self._dataset_manager.get_dataframe()
self._experiment = 1
self._folder_name = None
self._use_negatives = False
def get_labels(self):
return DOOR_LABELS
def set_experiment_number(self, experiment: int, folder_name: str) -> 'DatasetsCreatorDoorsFinal':
"""
This method is used to set up the experiment to run.
1) This first experiment involves training the model using k-1 folders and
testing it with all the examples in the remaining folder.
2) The second experiment involves fine-tuning the previously trained model using some examples of the test data used in experiment 1.
This new training data belongs to a new environment, never seen in the first training phase. The remaining sample of the k-th folder are used as a test set.
:param experiment: the number of the experiment to perform. It's value must be 0 or 1
:param folder_name: the name of the folder to use as a test set in experiment 1 or to split into training a test sets in experiment 2.
:return: the instance of DatasetsCreatorDoorsFinal
"""
assert experiment == 1 or experiment == 2
self._experiment = experiment
self._folder_name = folder_name
return self
def use_negatives(self, use_negatives: bool) -> 'DatasetsCreatorDoorsFinal':
"""
Sets the presence of the negative samples in the test set.
:param use_negatives: True for including the negatives samples (samples with no doors) in the test set, False to use only positives ones
:return: the instance of DatasetsDoorsF
"""
self._use_negatives = use_negatives
return self
def create_datasets(self, train_size: float = 0.1, random_state: int = 42) -> Tuple[DatasetDoorsFinal, DatasetDoorsFinal]:
"""
This method returns the training and test sets.
:param train_size: the size of the training set in experiment 2. For the first experiment this parameter is not considered, all samples of folders k-1 are considered.
"""
if isinstance(train_size, float):
assert 0.0 < train_size < 1.0
shuffled_dataframe = shuffle(self._dataframe, random_state=random_state)
if self._experiment == 1:
train_dataframe = shuffled_dataframe[(shuffled_dataframe.folder_name != self._folder_name) & (shuffled_dataframe.label == 1)]
test_dataframe = shuffled_dataframe[shuffled_dataframe.folder_name == self._folder_name]
if not self._use_negatives:
test_dataframe = test_dataframe[test_dataframe.label == 1]
elif self._experiment == 2:
shuffled_dataframe = shuffled_dataframe[shuffled_dataframe.folder_name == self._folder_name]
positive_dataframe = shuffled_dataframe[shuffled_dataframe.label == 1]
negative_dataframe = shuffled_dataframe[shuffled_dataframe.label == 0]
train, test = train_test_split(positive_dataframe.index.tolist(), test_size=0.25, random_state=random_state)
if train_size < 0.75:
train, _ = train_test_split(train, train_size=train_size * (4 / 3), random_state=random_state)
train_dataframe = shuffled_dataframe.loc[train]
test_dataframe = shuffle(pd.concat([shuffled_dataframe.loc[test], negative_dataframe]), random_state=random_state)
if not self._use_negatives:
test_dataframe = test_dataframe[test_dataframe.label == 1]
def print_information(dataframe):
print(f' - total samples = {len(dataframe.index)}\n'
f' - Folders considered: {sorted(dataframe.folder_name.unique())}\n'
f' - Labels considered: {sorted(dataframe.label.unique())}\n'
f' - Total samples in folder: ')
for folder in sorted(dataframe.folder_name.unique()):
print(f' - {folder}: {len(dataframe[dataframe.folder_name == folder])} samples')
if DoorSample.GET_LABEL_SET():
print(f' Samples per label:')
for label in sorted(list(DoorSample.GET_LABEL_SET())):
print(f' - {label}: {len(dataframe[(dataframe.folder_name == folder) & (dataframe.label == label)])}')
print()
for m, d in zip(['Datasets summary:', 'Train set summary:', 'Test set summary:'], [self._dataframe, train_dataframe, test_dataframe]):
print(m)
print_information(d)
return (DatasetDoorsFinal(self._dataset_path, train_dataframe, TRAIN_SET, std_size=256, max_size=800, scales=[256 + i * 32 for i in range(11)]),
DatasetDoorsFinal(self._dataset_path, test_dataframe, TEST_SET, std_size=256, max_size=800, scales=[256 + i * 32 for i in range(11)])) | 1,162 | 3,936 | 23 |
005dfd3bd6b99b749c3643626e7c275bbe2acb28 | 1,251 | py | Python | com/Leetcode/981.TimeBasedKey-ValueStore.py | samkitsheth95/InterviewPrep | 6be68c19bcaab4e64a8f646cc64f651bade8ba86 | [
"MIT"
] | null | null | null | com/Leetcode/981.TimeBasedKey-ValueStore.py | samkitsheth95/InterviewPrep | 6be68c19bcaab4e64a8f646cc64f651bade8ba86 | [
"MIT"
] | null | null | null | com/Leetcode/981.TimeBasedKey-ValueStore.py | samkitsheth95/InterviewPrep | 6be68c19bcaab4e64a8f646cc64f651bade8ba86 | [
"MIT"
] | null | null | null | from collections import defaultdict
from bisect import bisect
# Your TimeMap object will be instantiated and called as such:
# obj = TimeMap()
# obj.set(key,value,timestamp)
# param_2 = obj.get(key,timestamp)
| 25.02 | 64 | 0.513189 | from collections import defaultdict
from bisect import bisect
class TimeMap:
def binarySearch(self, a, key):
if key < a[0][1]:
return ''
elif key >= a[-1][1]:
return a[-1][0]
low = 0
high = len(a) - 1
while low <= high:
mid = low + (high - low) // 2
if a[mid][1] == key:
return a[mid][0]
elif a[mid][1] > key:
high = mid - 1
else:
low = mid + 1
return a[high][0]
def __init__(self):
"""
Initialize your data structure here.
"""
self.d = defaultdict(list)
def set(self, key: str, value: str, timestamp: int) -> None:
self.d[key].append((value, timestamp))
def get(self, key: str, timestamp: int) -> str:
return self.binarySearch(self.d[key], timestamp)
def getBisect(self, key, timestamp):
A = self.M.get(key, None)
if A is None:
return ""
i = bisect.bisect(A, (timestamp, chr(127)))
return A[i-1][1] if i else ""
# Your TimeMap object will be instantiated and called as such:
# obj = TimeMap()
# obj.set(key,value,timestamp)
# param_2 = obj.get(key,timestamp)
| 786 | 230 | 23 |
5017ac97f2b5056a11800f28fde484ec4a35c1b3 | 8,797 | py | Python | sstvis.py | mdjong1/sstvis | 927590b1295491a062a77634008a9146e783c617 | [
"MIT"
] | null | null | null | sstvis.py | mdjong1/sstvis | 927590b1295491a062a77634008a9146e783c617 | [
"MIT"
] | null | null | null | sstvis.py | mdjong1/sstvis | 927590b1295491a062a77634008a9146e783c617 | [
"MIT"
] | null | null | null | import fileinput
import sys
import math
import time
import os
import click
# prevent pygame from printing their welcome message
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
import pygame
# Define some basic colors for easy use
white = (255, 255, 255)
red = (255, 0, 0)
black = (0, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
# Screen resolution to use
window_dimensions = (1200, 800)
# Higher frequency is less updates, lower frequency is more updates (it's a x % frequency == 0)
UPDATE_FREQUENCY = 1000
# Only updates every nth triangle, increases clarity in high density datasets
# Can also put this to 1 and make the scaling factor larger
THINNING_FACTOR = 1
pygame.init()
screen = pygame.display.set_mode(window_dimensions)
screen.fill(white)
font = pygame.font.SysFont("Arial", 12)
# TODO: Split label and value for each statistics field
time_taken = font.render("time:", True, white, blue)
tt_rect = time_taken.get_rect(bottomright=(80, window_dimensions[1] - 65))
screen.blit(time_taken, tt_rect)
time_taken_val = font.render(" ", True, white, blue)
tt_rect2 = time_taken_val.get_rect(bottomleft=(80, window_dimensions[1] - 65))
screen.blit(time_taken_val, tt_rect2)
points_per_second = font.render("avg #pts/s:", True, white, blue)
pps_rect = points_per_second.get_rect(bottomright=(80, window_dimensions[1] - 45))
screen.blit(points_per_second, pps_rect)
points_per_second_val = font.render(" ", True, white, blue)
pps_rect2 = points_per_second_val.get_rect(bottomleft=(80, window_dimensions[1] - 45))
screen.blit(points_per_second_val, pps_rect2)
# points_last_minute = font.render(" # pts last minute:", True, white, blue)
# plm_rect = points_last_minute.get_rect(bottomright=(80, window_dimensions[1] - 95))
# screen.blit(points_last_minute, plm_rect)
# points_last_minute_val = font.render(" ", True, white, blue)
# plm_rect2 = points_last_minute_val.get_rect(bottomleft=(80, window_dimensions[1] - 95))
# screen.blit(points_last_minute_val, plm_rect2)
total_points = font.render("# pts:", True, white, blue)
tp_rect = total_points.get_rect(bottomright=(80, window_dimensions[1] - 25))
screen.blit(total_points, tp_rect)
total_points_val = font.render(" ", True, white, blue)
tp_rect2 = total_points_val.get_rect(bottomleft=(80, window_dimensions[1] - 25))
screen.blit(total_points_val, tp_rect2)
total_triangles = font.render("# triangles:", True, white, blue)
ttr_rect = total_triangles.get_rect(bottomright=(80, window_dimensions[1] - 5))
screen.blit(total_triangles, ttr_rect)
total_triangles_val = font.render(" ", True, white, blue)
ttr_rect2 = total_triangles_val.get_rect(bottomleft=(80, window_dimensions[1] - 5))
screen.blit(total_triangles_val, ttr_rect2)
pygame.display.set_caption('sstvis')
pygame.display.flip()
@click.command()
@click.option('--thinning', default=THINNING_FACTOR, help='thinning factor (1 = no thinning)')
@click.option('--frequency', default=UPDATE_FREQUENCY, help='Higher frequency is less updates, lower frequency is more updates')
if __name__ == "__main__":
main() | 35.615385 | 128 | 0.630329 | import fileinput
import sys
import math
import time
import os
import click
# prevent pygame from printing their welcome message
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
import pygame
# Define some basic colors for easy use
white = (255, 255, 255)
red = (255, 0, 0)
black = (0, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
# Screen resolution to use
window_dimensions = (1200, 800)
# Higher frequency is less updates, lower frequency is more updates (it's a x % frequency == 0)
UPDATE_FREQUENCY = 1000
# Only updates every nth triangle, increases clarity in high density datasets
# Can also put this to 1 and make the scaling factor larger
THINNING_FACTOR = 1
pygame.init()
screen = pygame.display.set_mode(window_dimensions)
screen.fill(white)
font = pygame.font.SysFont("Arial", 12)
# TODO: Split label and value for each statistics field
time_taken = font.render("time:", True, white, blue)
tt_rect = time_taken.get_rect(bottomright=(80, window_dimensions[1] - 65))
screen.blit(time_taken, tt_rect)
time_taken_val = font.render(" ", True, white, blue)
tt_rect2 = time_taken_val.get_rect(bottomleft=(80, window_dimensions[1] - 65))
screen.blit(time_taken_val, tt_rect2)
points_per_second = font.render("avg #pts/s:", True, white, blue)
pps_rect = points_per_second.get_rect(bottomright=(80, window_dimensions[1] - 45))
screen.blit(points_per_second, pps_rect)
points_per_second_val = font.render(" ", True, white, blue)
pps_rect2 = points_per_second_val.get_rect(bottomleft=(80, window_dimensions[1] - 45))
screen.blit(points_per_second_val, pps_rect2)
# points_last_minute = font.render(" # pts last minute:", True, white, blue)
# plm_rect = points_last_minute.get_rect(bottomright=(80, window_dimensions[1] - 95))
# screen.blit(points_last_minute, plm_rect)
# points_last_minute_val = font.render(" ", True, white, blue)
# plm_rect2 = points_last_minute_val.get_rect(bottomleft=(80, window_dimensions[1] - 95))
# screen.blit(points_last_minute_val, plm_rect2)
total_points = font.render("# pts:", True, white, blue)
tp_rect = total_points.get_rect(bottomright=(80, window_dimensions[1] - 25))
screen.blit(total_points, tp_rect)
total_points_val = font.render(" ", True, white, blue)
tp_rect2 = total_points_val.get_rect(bottomleft=(80, window_dimensions[1] - 25))
screen.blit(total_points_val, tp_rect2)
total_triangles = font.render("# triangles:", True, white, blue)
ttr_rect = total_triangles.get_rect(bottomright=(80, window_dimensions[1] - 5))
screen.blit(total_triangles, ttr_rect)
total_triangles_val = font.render(" ", True, white, blue)
ttr_rect2 = total_triangles_val.get_rect(bottomleft=(80, window_dimensions[1] - 5))
screen.blit(total_triangles_val, ttr_rect2)
pygame.display.set_caption('sstvis')
pygame.display.flip()
class Vertex:
def __init__(self, x, y, z):
self.x = float(x)
self.y = float(y)
self.z = float(z)
class Processor:
def __init__(self):
self.bbox = []
self.vertices = {}
self.count = 0
self.vertex_count = 1
self.triangle_count = 0
self.scale = 1
self.start_time = time.time()
self.points_per_time = {}
def transform(self, x, y):
rex = (float(x) - self.bbox[0]) * self.scale + 5
rey = (float(y) - self.bbox[1]) * self.scale
return rex, rey
def increment_count(self):
self.count += 1
def update_statistics(self):
current_epoch = int(time.time())
time_taken_val = font.render(" " + str(round(current_epoch - self.start_time)) + "s ", True, black, white)
screen.blit(time_taken_val, tt_rect2)
points_in_past_minute = 0
for i in range(current_epoch - 60, current_epoch):
if i in self.points_per_time:
points_in_past_minute += self.points_per_time[i]
points_per_second_val = font.render(" " + str(round(points_in_past_minute / 60)) + " ", True, black, white)
screen.blit(points_per_second_val, pps_rect2)
# points_last_minute_val = font.render(" " + str(points_in_past_minute) + " ", True, black, white)
# screen.blit(points_last_minute_val, plm_rect2)
total_points_val = font.render(" " + str(self.vertex_count - 1) + " ", True, black, white)
screen.blit(total_points_val, tp_rect2)
total_triangles_val = font.render(" " + str(self.triangle_count) + " ", True, black, white)
screen.blit(total_triangles_val, ttr_rect2)
# Keep these on top for legibility
screen.blit(time_taken, tt_rect)
screen.blit(points_per_second, pps_rect)
# screen.blit(points_last_minute, plm_rect)
screen.blit(total_points, tp_rect)
screen.blit(total_triangles, ttr_rect)
def process_line(self, line):
pygame.event.get()
split_line = line.rstrip("\n").split(" ")
if split_line[0] == "#":
return
elif split_line[0] == "b":
self.bbox.append(float(split_line[1]))
self.bbox.append(float(split_line[2]))
self.bbox.append(float(split_line[3]))
self.bbox.append(float(split_line[4]))
delta_x = self.bbox[2] - self.bbox[0]
delta_y = self.bbox[3] - self.bbox[1]
largest_delta = delta_y if delta_y > delta_x else delta_x
self.scale = math.floor(window_dimensions[1] / largest_delta)
minx, miny = self.transform(self.bbox[0], self.bbox[1])
maxx, maxy = self.transform(self.bbox[2], self.bbox[3])
pygame.draw.lines(
surface=screen,
color=red,
closed=True,
points=(
(minx, window_dimensions[1] - miny - 5),
(maxx, window_dimensions[1] - miny - 5),
(maxx, window_dimensions[1] - maxy - 5),
(minx, window_dimensions[1] - maxy - 5)
),
width=3
)
pygame.display.update()
elif split_line[0] == "v":
# Add vertex count per unit
current_epoch = int(time.time())
if current_epoch not in self.points_per_time:
self.points_per_time[current_epoch] = 1
else:
self.points_per_time[current_epoch] += 1
# Transform x and y into current scale for visualization, then store that version in the Vertex
x, y = self.transform(split_line[1], split_line[2])
z = split_line[3]
self.vertices[self.vertex_count] = Vertex(x, y, z)
self.vertex_count += 1
elif split_line[0] == "f":
f1 = int(split_line[1])
f2 = int(split_line[2])
f3 = int(split_line[3])
if self.count % THINNING_FACTOR == 0:
pygame.draw.lines(
surface=screen,
color=black,
closed=True,
points=(
(self.vertices[f1].x, window_dimensions[1] - self.vertices[f1].y - 5),
(self.vertices[f2].x, window_dimensions[1] - self.vertices[f2].y - 5),
(self.vertices[f3].x, window_dimensions[1] - self.vertices[f3].y - 5)
),
width=1)
# pygame.draw.circle(screen, black, ((vertices[f1].x, vertices[f1].y)), 1)
self.update_statistics()
if self.count % UPDATE_FREQUENCY == 0:
pygame.display.update()
self.triangle_count += 1
@click.command()
@click.option('--thinning', default=THINNING_FACTOR, help='thinning factor (1 = no thinning)')
@click.option('--frequency', default=UPDATE_FREQUENCY, help='Higher frequency is less updates, lower frequency is more updates')
def main(thinning, frequency):
global THINNING_FACTOR
global UPDATE_FREQUENCY
THINNING_FACTOR = thinning
UPDATE_FREQUENCY = frequency
processor = Processor()
for stdin_line in sys.stdin:
if stdin_line == "":
continue
processor.process_line(stdin_line)
processor.increment_count()
sys.stdout.write(stdin_line)
# Last update of statistics to ensure uniformity
processor.update_statistics()
# Do a final update; because of update frequency a final update in processing loop is not guaranteed
pygame.display.update()
# Keep the pygame window running so you can view the final result
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):
running = False
if __name__ == "__main__":
main() | 5,526 | -13 | 228 |
c566c62bfc91343391f87b835b9e079719e2045b | 24,813 | py | Python | neo4j/_async/io/_bolt.py | matilda-me/neo4j-python-driver | 4fb25a266841bf2a861f00d5dcf257bd5ae5c686 | [
"Apache-2.0"
] | null | null | null | neo4j/_async/io/_bolt.py | matilda-me/neo4j-python-driver | 4fb25a266841bf2a861f00d5dcf257bd5ae5c686 | [
"Apache-2.0"
] | null | null | null | neo4j/_async/io/_bolt.py | matilda-me/neo4j-python-driver | 4fb25a266841bf2a861f00d5dcf257bd5ae5c686 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) "Neo4j"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import asyncio
from collections import deque
from logging import getLogger
from time import perf_counter
from ..._async_compat.network import AsyncBoltSocket
from ..._async_compat.util import AsyncUtil
from ..._exceptions import (
BoltError,
BoltHandshakeError,
SocketDeadlineExceeded,
)
from ...addressing import Address
from ...api import (
ServerInfo,
Version,
)
from ...conf import PoolConfig
from ...exceptions import (
AuthError,
DriverError,
IncompleteCommit,
ServiceUnavailable,
SessionExpired,
)
from ...meta import get_user_agent
from ...packstream import (
Packer,
Unpacker,
)
from ._common import (
AsyncInbox,
CommitResponse,
Outbox,
)
# Set up logger
log = getLogger("neo4j")
class AsyncBolt:
""" Server connection for Bolt protocol.
A :class:`.Bolt` should be constructed following a
successful .open()
Bolt handshake and takes the socket over which
the handshake was carried out.
"""
MAGIC_PREAMBLE = b"\x60\x60\xB0\x17"
PROTOCOL_VERSION = None
# flag if connection needs RESET to go back to READY state
is_reset = False
# The socket
in_use = False
# When the connection was last put back into the pool
idle_since = float("-inf")
# The socket
_closing = False
_closed = False
# The socket
_defunct = False
#: The pool of which this connection is a member
pool = None
# Store the id of the most recent ran query to be able to reduce sent bits by
# using the default (-1) to refer to the most recent query when pulling
# results for it.
most_recent_qid = None
@property
@abc.abstractmethod
def supports_multiple_results(self):
""" Boolean flag to indicate if the connection version supports multiple
queries to be buffered on the server side (True) or if all results need
to be eagerly pulled before sending the next RUN (False).
"""
pass
@property
@abc.abstractmethod
def supports_multiple_databases(self):
""" Boolean flag to indicate if the connection version supports multiple
databases.
"""
pass
@classmethod
def protocol_handlers(cls, protocol_version=None):
""" Return a dictionary of available Bolt protocol handlers,
keyed by version tuple. If an explicit protocol version is
provided, the dictionary will contain either zero or one items,
depending on whether that version is supported. If no protocol
version is provided, all available versions will be returned.
:param protocol_version: tuple identifying a specific protocol
version (e.g. (3, 5)) or None
:return: dictionary of version tuple to handler class for all
relevant and supported protocol versions
:raise TypeError: if protocol version is not passed in a tuple
"""
# Carry out Bolt subclass imports locally to avoid circular dependency issues.
from ._bolt3 import AsyncBolt3
from ._bolt4 import (
AsyncBolt4x1,
AsyncBolt4x2,
AsyncBolt4x3,
AsyncBolt4x4,
)
from ._bolt5 import AsyncBolt5x0
handlers = {
AsyncBolt3.PROTOCOL_VERSION: AsyncBolt3,
# 4.0 unsupported because no space left in the handshake
AsyncBolt4x1.PROTOCOL_VERSION: AsyncBolt4x1,
AsyncBolt4x2.PROTOCOL_VERSION: AsyncBolt4x2,
AsyncBolt4x3.PROTOCOL_VERSION: AsyncBolt4x3,
AsyncBolt4x4.PROTOCOL_VERSION: AsyncBolt4x4,
AsyncBolt5x0.PROTOCOL_VERSION: AsyncBolt5x0,
}
if protocol_version is None:
return handlers
if not isinstance(protocol_version, tuple):
raise TypeError("Protocol version must be specified as a tuple")
if protocol_version in handlers:
return {protocol_version: handlers[protocol_version]}
return {}
@classmethod
def version_list(cls, versions, limit=4):
""" Return a list of supported protocol versions in order of
preference. The number of protocol versions (or ranges)
returned is limited to four.
"""
# In fact, 4.3 is the fist version to support ranges. However, the
# range support got backported to 4.2. But even if the server is too
# old to have the backport, negotiating BOLT 4.1 is no problem as it's
# equivalent to 4.2
first_with_range_support = Version(4, 2)
result = []
for version in versions:
if (result
and version >= first_with_range_support
and result[-1][0] == version[0]
and result[-1][1][1] == version[1] + 1):
# can use range to encompass this version
result[-1][1][1] = version[1]
continue
result.append(Version(version[0], [version[1], version[1]]))
if len(result) == 4:
break
return result
@classmethod
def get_handshake(cls):
""" Return the supported Bolt versions as bytes.
The length is 16 bytes as specified in the Bolt version negotiation.
:return: bytes
"""
supported_versions = sorted(cls.protocol_handlers().keys(), reverse=True)
offered_versions = cls.version_list(supported_versions)
return b"".join(version.to_bytes() for version in offered_versions).ljust(16, b"\x00")
@classmethod
async def ping(cls, address, *, timeout=None, **config):
""" Attempt to establish a Bolt connection, returning the
agreed Bolt protocol version if successful.
"""
config = PoolConfig.consume(config)
try:
s, protocol_version, handshake, data = \
await AsyncBoltSocket.connect(
address,
timeout=timeout,
custom_resolver=config.resolver,
ssl_context=config.get_ssl_context(),
keep_alive=config.keep_alive,
)
except (ServiceUnavailable, SessionExpired, BoltHandshakeError):
return None
else:
AsyncBoltSocket.close_socket(s)
return protocol_version
@classmethod
async def open(
cls, address, *, auth=None, timeout=None, routing_context=None,
**pool_config
):
"""Open a new Bolt connection to a given server address.
:param address:
:param auth:
:param timeout: the connection timeout in seconds
:param routing_context: dict containing routing context
:param pool_config:
:return: connected AsyncBolt instance
:raise BoltHandshakeError:
raised if the Bolt Protocol can not negotiate a protocol version.
:raise ServiceUnavailable: raised if there was a connection issue.
"""
t0 = perf_counter()
pool_config = PoolConfig.consume(pool_config)
socket_connection_timeout = pool_config.connection_timeout
if socket_connection_timeout is None:
socket_connection_timeout = time_remaining()
elif timeout is not None:
socket_connection_timeout = min(pool_config.connection_timeout,
time_remaining())
s, pool_config.protocol_version, handshake, data = \
await AsyncBoltSocket.connect(
address,
timeout=socket_connection_timeout,
custom_resolver=pool_config.resolver,
ssl_context=pool_config.get_ssl_context(),
keep_alive=pool_config.keep_alive,
)
# Carry out Bolt subclass imports locally to avoid circular dependency
# issues.
if pool_config.protocol_version == (3, 0):
from ._bolt3 import AsyncBolt3
bolt_cls = AsyncBolt3
# Implementation for 4.0 exists, but there was no space left in the
# handshake to offer this version to the server. Hence, the server
# should never request us to speak bolt 4.0.
# elif pool_config.protocol_version == (4, 0):
# from ._bolt4 import AsyncBolt4x0
# bolt_cls = AsyncBolt4x0
elif pool_config.protocol_version == (4, 1):
from ._bolt4 import AsyncBolt4x1
bolt_cls = AsyncBolt4x1
elif pool_config.protocol_version == (4, 2):
from ._bolt4 import AsyncBolt4x2
bolt_cls = AsyncBolt4x2
elif pool_config.protocol_version == (4, 3):
from ._bolt4 import AsyncBolt4x3
bolt_cls = AsyncBolt4x3
elif pool_config.protocol_version == (4, 4):
from ._bolt4 import AsyncBolt4x4
bolt_cls = AsyncBolt4x4
elif pool_config.protocol_version == (5, 0):
from ._bolt5 import AsyncBolt5x0
bolt_cls = AsyncBolt5x0
else:
log.debug("[#%04X] S: <CLOSE>", s.getsockname()[1])
AsyncBoltSocket.close_socket(s)
supported_versions = cls.protocol_handlers().keys()
raise BoltHandshakeError(
"The Neo4J server does not support communication with this "
"driver. This driver has support for Bolt protocols "
"{}".format(tuple(map(str, supported_versions))),
address=address, request_data=handshake, response_data=data
)
connection = bolt_cls(
address, s, pool_config.max_connection_lifetime, auth=auth,
user_agent=pool_config.user_agent, routing_context=routing_context
)
try:
connection.socket.set_deadline(time_remaining())
try:
await connection.hello()
except SocketDeadlineExceeded as e:
# connection._defunct = True
raise ServiceUnavailable(
"Timeout during initial handshake occurred"
) from e
finally:
connection.socket.set_deadline(None)
except Exception:
await connection.close_non_blocking()
raise
return connection
@property
@abc.abstractmethod
@property
@abc.abstractmethod
@property
@abc.abstractmethod
@abc.abstractmethod
async def hello(self):
""" Appends a HELLO message to the outgoing queue, sends it and consumes
all remaining messages.
"""
pass
@abc.abstractmethod
async def route(self, database=None, imp_user=None, bookmarks=None):
""" Fetch a routing table from the server for the given
`database`. For Bolt 4.3 and above, this appends a ROUTE
message; for earlier versions, a procedure call is made via
the regular Cypher execution mechanism. In all cases, this is
sent to the network, and a response is fetched.
:param database: database for which to fetch a routing table
Requires Bolt 4.0+.
:param imp_user: the user to impersonate
Requires Bolt 4.4+.
:param bookmarks: iterable of bookmark values after which this
transaction should begin
:return: dictionary of raw routing data
"""
pass
@abc.abstractmethod
def run(self, query, parameters=None, mode=None, bookmarks=None,
metadata=None, timeout=None, db=None, imp_user=None, **handlers):
""" Appends a RUN message to the output queue.
:param query: Cypher query string
:param parameters: dictionary of Cypher parameters
:param mode: access mode for routing - "READ" or "WRITE" (default)
:param bookmarks: iterable of bookmark values after which this transaction should begin
:param metadata: custom metadata dictionary to attach to the transaction
:param timeout: timeout for transaction execution (seconds)
:param db: name of the database against which to begin the transaction
Requires Bolt 4.0+.
:param imp_user: the user to impersonate
Requires Bolt 4.4+.
:param handlers: handler functions passed into the returned Response object
:return: Response object
"""
pass
@abc.abstractmethod
def discard(self, n=-1, qid=-1, **handlers):
""" Appends a DISCARD message to the output queue.
:param n: number of records to discard, default = -1 (ALL)
:param qid: query ID to discard for, default = -1 (last query)
:param handlers: handler functions passed into the returned Response object
:return: Response object
"""
pass
@abc.abstractmethod
def pull(self, n=-1, qid=-1, **handlers):
""" Appends a PULL message to the output queue.
:param n: number of records to pull, default = -1 (ALL)
:param qid: query ID to pull for, default = -1 (last query)
:param handlers: handler functions passed into the returned Response object
:return: Response object
"""
pass
@abc.abstractmethod
def begin(self, mode=None, bookmarks=None, metadata=None, timeout=None,
db=None, imp_user=None, **handlers):
""" Appends a BEGIN message to the output queue.
:param mode: access mode for routing - "READ" or "WRITE" (default)
:param bookmarks: iterable of bookmark values after which this transaction should begin
:param metadata: custom metadata dictionary to attach to the transaction
:param timeout: timeout for transaction execution (seconds)
:param db: name of the database against which to begin the transaction
Requires Bolt 4.0+.
:param imp_user: the user to impersonate
Requires Bolt 4.4+
:param handlers: handler functions passed into the returned Response object
:return: Response object
"""
pass
@abc.abstractmethod
def commit(self, **handlers):
""" Appends a COMMIT message to the output queue."""
pass
@abc.abstractmethod
def rollback(self, **handlers):
""" Appends a ROLLBACK message to the output queue."""
pass
@abc.abstractmethod
async def reset(self):
""" Appends a RESET message to the outgoing queue, sends it and consumes
all remaining messages.
"""
pass
@abc.abstractmethod
def goodbye(self):
"""Append a GOODBYE message to the outgoing queued."""
pass
def _append(self, signature, fields=(), response=None):
""" Appends a message to the outgoing queue.
:param signature: the signature of the message
:param fields: the fields of the message as a tuple
:param response: a response object to handle callbacks
"""
with self.outbox.tmp_buffer():
self.packer.pack_struct(signature, fields)
self.outbox.wrap_message()
self.responses.append(response)
async def send_all(self):
""" Send all queued messages to the server.
"""
if self.closed():
raise ServiceUnavailable(
"Failed to write to closed connection {!r} ({!r})".format(
self.unresolved_address, self.server_info.address
)
)
if self.defunct():
raise ServiceUnavailable(
"Failed to write to defunct connection {!r} ({!r})".format(
self.unresolved_address, self.server_info.address
)
)
await self._send_all()
@abc.abstractmethod
async def _process_message(self, details, summary_signature,
summary_metadata):
""" Receive at most one message from the server, if available.
:return: 2-tuple of number of detail messages and number of summary
messages fetched
"""
pass
async def fetch_all(self):
""" Fetch all outstanding messages.
:return: 2-tuple of number of detail messages and number of summary
messages fetched
"""
detail_count = summary_count = 0
while self.responses:
response = self.responses[0]
while not response.complete:
detail_delta, summary_delta = await self.fetch_message()
detail_count += detail_delta
summary_count += summary_delta
return detail_count, summary_count
_stale = False
async def close(self):
"""Close the connection."""
if self._closed or self._closing:
return
self._closing = True
if not self._defunct:
self.goodbye()
try:
await self._send_all()
except (OSError, BoltError, DriverError):
pass
log.debug("[#%04X] C: <CLOSE>", self.local_port)
try:
self.socket.close()
except OSError:
pass
finally:
self._closed = True
async def close_non_blocking(self):
"""Set the socket to non-blocking and close it.
This will try to send the `GOODBYE` message (given the socket is not
marked as defunct). However, should the write operation require
blocking (e.g., a full network buffer), then the socket will be closed
immediately (without `GOODBYE` message).
"""
if self._closed or self._closing:
return
self.socket.settimeout(0)
await self.close()
def is_idle_for(self, timeout):
"""Check if connection has been idle for at least the given timeout.
:param timeout: timeout in seconds
:type timeout: float
:rtype: bool
"""
return perf_counter() - self.idle_since > timeout
AsyncBoltSocket.Bolt = AsyncBolt
| 35.548711 | 95 | 0.614476 | # Copyright (c) "Neo4j"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import asyncio
from collections import deque
from logging import getLogger
from time import perf_counter
from ..._async_compat.network import AsyncBoltSocket
from ..._async_compat.util import AsyncUtil
from ..._exceptions import (
BoltError,
BoltHandshakeError,
SocketDeadlineExceeded,
)
from ...addressing import Address
from ...api import (
ServerInfo,
Version,
)
from ...conf import PoolConfig
from ...exceptions import (
AuthError,
DriverError,
IncompleteCommit,
ServiceUnavailable,
SessionExpired,
)
from ...meta import get_user_agent
from ...packstream import (
Packer,
Unpacker,
)
from ._common import (
AsyncInbox,
CommitResponse,
Outbox,
)
# Set up logger
log = getLogger("neo4j")
class AsyncBolt:
""" Server connection for Bolt protocol.
A :class:`.Bolt` should be constructed following a
successful .open()
Bolt handshake and takes the socket over which
the handshake was carried out.
"""
MAGIC_PREAMBLE = b"\x60\x60\xB0\x17"
PROTOCOL_VERSION = None
# flag if connection needs RESET to go back to READY state
is_reset = False
# The socket
in_use = False
# When the connection was last put back into the pool
idle_since = float("-inf")
# The socket
_closing = False
_closed = False
# The socket
_defunct = False
#: The pool of which this connection is a member
pool = None
# Store the id of the most recent ran query to be able to reduce sent bits by
# using the default (-1) to refer to the most recent query when pulling
# results for it.
most_recent_qid = None
def __init__(self, unresolved_address, sock, max_connection_lifetime, *,
auth=None, user_agent=None, routing_context=None):
self.unresolved_address = unresolved_address
self.socket = sock
self.server_info = ServerInfo(Address(sock.getpeername()),
self.PROTOCOL_VERSION)
# so far `connection.recv_timeout_seconds` is the only available
# configuration hint that exists. Therefore, all hints can be stored at
# connection level. This might change in the future.
self.configuration_hints = {}
self.outbox = Outbox()
self.inbox = AsyncInbox(self.socket, on_error=self._set_defunct_read)
self.packer = Packer(self.outbox)
self.unpacker = Unpacker(self.inbox)
self.responses = deque()
self._max_connection_lifetime = max_connection_lifetime
self._creation_timestamp = perf_counter()
self.routing_context = routing_context
self.idle_since = perf_counter()
# Determine the user agent
if user_agent:
self.user_agent = user_agent
else:
self.user_agent = get_user_agent()
# Determine auth details
if not auth:
self.auth_dict = {}
elif isinstance(auth, tuple) and 2 <= len(auth) <= 3:
from neo4j import Auth
self.auth_dict = vars(Auth("basic", *auth))
else:
try:
self.auth_dict = vars(auth)
except (KeyError, TypeError):
raise AuthError("Cannot determine auth details from %r" % auth)
# Check for missing password
try:
credentials = self.auth_dict["credentials"]
except KeyError:
pass
else:
if credentials is None:
raise AuthError("Password cannot be None")
def __del__(self):
if not asyncio.iscoroutinefunction(self.close):
self.close()
@property
@abc.abstractmethod
def supports_multiple_results(self):
""" Boolean flag to indicate if the connection version supports multiple
queries to be buffered on the server side (True) or if all results need
to be eagerly pulled before sending the next RUN (False).
"""
pass
@property
@abc.abstractmethod
def supports_multiple_databases(self):
""" Boolean flag to indicate if the connection version supports multiple
databases.
"""
pass
@classmethod
def protocol_handlers(cls, protocol_version=None):
""" Return a dictionary of available Bolt protocol handlers,
keyed by version tuple. If an explicit protocol version is
provided, the dictionary will contain either zero or one items,
depending on whether that version is supported. If no protocol
version is provided, all available versions will be returned.
:param protocol_version: tuple identifying a specific protocol
version (e.g. (3, 5)) or None
:return: dictionary of version tuple to handler class for all
relevant and supported protocol versions
:raise TypeError: if protocol version is not passed in a tuple
"""
# Carry out Bolt subclass imports locally to avoid circular dependency issues.
from ._bolt3 import AsyncBolt3
from ._bolt4 import (
AsyncBolt4x1,
AsyncBolt4x2,
AsyncBolt4x3,
AsyncBolt4x4,
)
from ._bolt5 import AsyncBolt5x0
handlers = {
AsyncBolt3.PROTOCOL_VERSION: AsyncBolt3,
# 4.0 unsupported because no space left in the handshake
AsyncBolt4x1.PROTOCOL_VERSION: AsyncBolt4x1,
AsyncBolt4x2.PROTOCOL_VERSION: AsyncBolt4x2,
AsyncBolt4x3.PROTOCOL_VERSION: AsyncBolt4x3,
AsyncBolt4x4.PROTOCOL_VERSION: AsyncBolt4x4,
AsyncBolt5x0.PROTOCOL_VERSION: AsyncBolt5x0,
}
if protocol_version is None:
return handlers
if not isinstance(protocol_version, tuple):
raise TypeError("Protocol version must be specified as a tuple")
if protocol_version in handlers:
return {protocol_version: handlers[protocol_version]}
return {}
@classmethod
def version_list(cls, versions, limit=4):
""" Return a list of supported protocol versions in order of
preference. The number of protocol versions (or ranges)
returned is limited to four.
"""
# In fact, 4.3 is the fist version to support ranges. However, the
# range support got backported to 4.2. But even if the server is too
# old to have the backport, negotiating BOLT 4.1 is no problem as it's
# equivalent to 4.2
first_with_range_support = Version(4, 2)
result = []
for version in versions:
if (result
and version >= first_with_range_support
and result[-1][0] == version[0]
and result[-1][1][1] == version[1] + 1):
# can use range to encompass this version
result[-1][1][1] = version[1]
continue
result.append(Version(version[0], [version[1], version[1]]))
if len(result) == 4:
break
return result
@classmethod
def get_handshake(cls):
""" Return the supported Bolt versions as bytes.
The length is 16 bytes as specified in the Bolt version negotiation.
:return: bytes
"""
supported_versions = sorted(cls.protocol_handlers().keys(), reverse=True)
offered_versions = cls.version_list(supported_versions)
return b"".join(version.to_bytes() for version in offered_versions).ljust(16, b"\x00")
@classmethod
async def ping(cls, address, *, timeout=None, **config):
""" Attempt to establish a Bolt connection, returning the
agreed Bolt protocol version if successful.
"""
config = PoolConfig.consume(config)
try:
s, protocol_version, handshake, data = \
await AsyncBoltSocket.connect(
address,
timeout=timeout,
custom_resolver=config.resolver,
ssl_context=config.get_ssl_context(),
keep_alive=config.keep_alive,
)
except (ServiceUnavailable, SessionExpired, BoltHandshakeError):
return None
else:
AsyncBoltSocket.close_socket(s)
return protocol_version
@classmethod
async def open(
cls, address, *, auth=None, timeout=None, routing_context=None,
**pool_config
):
"""Open a new Bolt connection to a given server address.
:param address:
:param auth:
:param timeout: the connection timeout in seconds
:param routing_context: dict containing routing context
:param pool_config:
:return: connected AsyncBolt instance
:raise BoltHandshakeError:
raised if the Bolt Protocol can not negotiate a protocol version.
:raise ServiceUnavailable: raised if there was a connection issue.
"""
def time_remaining():
if timeout is None:
return None
t = timeout - (perf_counter() - t0)
return t if t > 0 else 0
t0 = perf_counter()
pool_config = PoolConfig.consume(pool_config)
socket_connection_timeout = pool_config.connection_timeout
if socket_connection_timeout is None:
socket_connection_timeout = time_remaining()
elif timeout is not None:
socket_connection_timeout = min(pool_config.connection_timeout,
time_remaining())
s, pool_config.protocol_version, handshake, data = \
await AsyncBoltSocket.connect(
address,
timeout=socket_connection_timeout,
custom_resolver=pool_config.resolver,
ssl_context=pool_config.get_ssl_context(),
keep_alive=pool_config.keep_alive,
)
# Carry out Bolt subclass imports locally to avoid circular dependency
# issues.
if pool_config.protocol_version == (3, 0):
from ._bolt3 import AsyncBolt3
bolt_cls = AsyncBolt3
# Implementation for 4.0 exists, but there was no space left in the
# handshake to offer this version to the server. Hence, the server
# should never request us to speak bolt 4.0.
# elif pool_config.protocol_version == (4, 0):
# from ._bolt4 import AsyncBolt4x0
# bolt_cls = AsyncBolt4x0
elif pool_config.protocol_version == (4, 1):
from ._bolt4 import AsyncBolt4x1
bolt_cls = AsyncBolt4x1
elif pool_config.protocol_version == (4, 2):
from ._bolt4 import AsyncBolt4x2
bolt_cls = AsyncBolt4x2
elif pool_config.protocol_version == (4, 3):
from ._bolt4 import AsyncBolt4x3
bolt_cls = AsyncBolt4x3
elif pool_config.protocol_version == (4, 4):
from ._bolt4 import AsyncBolt4x4
bolt_cls = AsyncBolt4x4
elif pool_config.protocol_version == (5, 0):
from ._bolt5 import AsyncBolt5x0
bolt_cls = AsyncBolt5x0
else:
log.debug("[#%04X] S: <CLOSE>", s.getsockname()[1])
AsyncBoltSocket.close_socket(s)
supported_versions = cls.protocol_handlers().keys()
raise BoltHandshakeError(
"The Neo4J server does not support communication with this "
"driver. This driver has support for Bolt protocols "
"{}".format(tuple(map(str, supported_versions))),
address=address, request_data=handshake, response_data=data
)
connection = bolt_cls(
address, s, pool_config.max_connection_lifetime, auth=auth,
user_agent=pool_config.user_agent, routing_context=routing_context
)
try:
connection.socket.set_deadline(time_remaining())
try:
await connection.hello()
except SocketDeadlineExceeded as e:
# connection._defunct = True
raise ServiceUnavailable(
"Timeout during initial handshake occurred"
) from e
finally:
connection.socket.set_deadline(None)
except Exception:
await connection.close_non_blocking()
raise
return connection
@property
@abc.abstractmethod
def encrypted(self):
pass
@property
@abc.abstractmethod
def der_encoded_server_certificate(self):
pass
@property
@abc.abstractmethod
def local_port(self):
pass
@abc.abstractmethod
async def hello(self):
""" Appends a HELLO message to the outgoing queue, sends it and consumes
all remaining messages.
"""
pass
@abc.abstractmethod
async def route(self, database=None, imp_user=None, bookmarks=None):
""" Fetch a routing table from the server for the given
`database`. For Bolt 4.3 and above, this appends a ROUTE
message; for earlier versions, a procedure call is made via
the regular Cypher execution mechanism. In all cases, this is
sent to the network, and a response is fetched.
:param database: database for which to fetch a routing table
Requires Bolt 4.0+.
:param imp_user: the user to impersonate
Requires Bolt 4.4+.
:param bookmarks: iterable of bookmark values after which this
transaction should begin
:return: dictionary of raw routing data
"""
pass
@abc.abstractmethod
def run(self, query, parameters=None, mode=None, bookmarks=None,
metadata=None, timeout=None, db=None, imp_user=None, **handlers):
""" Appends a RUN message to the output queue.
:param query: Cypher query string
:param parameters: dictionary of Cypher parameters
:param mode: access mode for routing - "READ" or "WRITE" (default)
:param bookmarks: iterable of bookmark values after which this transaction should begin
:param metadata: custom metadata dictionary to attach to the transaction
:param timeout: timeout for transaction execution (seconds)
:param db: name of the database against which to begin the transaction
Requires Bolt 4.0+.
:param imp_user: the user to impersonate
Requires Bolt 4.4+.
:param handlers: handler functions passed into the returned Response object
:return: Response object
"""
pass
@abc.abstractmethod
def discard(self, n=-1, qid=-1, **handlers):
""" Appends a DISCARD message to the output queue.
:param n: number of records to discard, default = -1 (ALL)
:param qid: query ID to discard for, default = -1 (last query)
:param handlers: handler functions passed into the returned Response object
:return: Response object
"""
pass
@abc.abstractmethod
def pull(self, n=-1, qid=-1, **handlers):
""" Appends a PULL message to the output queue.
:param n: number of records to pull, default = -1 (ALL)
:param qid: query ID to pull for, default = -1 (last query)
:param handlers: handler functions passed into the returned Response object
:return: Response object
"""
pass
@abc.abstractmethod
def begin(self, mode=None, bookmarks=None, metadata=None, timeout=None,
db=None, imp_user=None, **handlers):
""" Appends a BEGIN message to the output queue.
:param mode: access mode for routing - "READ" or "WRITE" (default)
:param bookmarks: iterable of bookmark values after which this transaction should begin
:param metadata: custom metadata dictionary to attach to the transaction
:param timeout: timeout for transaction execution (seconds)
:param db: name of the database against which to begin the transaction
Requires Bolt 4.0+.
:param imp_user: the user to impersonate
Requires Bolt 4.4+
:param handlers: handler functions passed into the returned Response object
:return: Response object
"""
pass
@abc.abstractmethod
def commit(self, **handlers):
""" Appends a COMMIT message to the output queue."""
pass
@abc.abstractmethod
def rollback(self, **handlers):
""" Appends a ROLLBACK message to the output queue."""
pass
@abc.abstractmethod
async def reset(self):
""" Appends a RESET message to the outgoing queue, sends it and consumes
all remaining messages.
"""
pass
@abc.abstractmethod
def goodbye(self):
"""Append a GOODBYE message to the outgoing queued."""
pass
def _append(self, signature, fields=(), response=None):
""" Appends a message to the outgoing queue.
:param signature: the signature of the message
:param fields: the fields of the message as a tuple
:param response: a response object to handle callbacks
"""
with self.outbox.tmp_buffer():
self.packer.pack_struct(signature, fields)
self.outbox.wrap_message()
self.responses.append(response)
async def _send_all(self):
data = self.outbox.view()
if data:
try:
await self.socket.sendall(data)
except OSError as error:
await self._set_defunct_write(error)
self.outbox.clear()
self.idle_since = perf_counter()
async def send_all(self):
""" Send all queued messages to the server.
"""
if self.closed():
raise ServiceUnavailable(
"Failed to write to closed connection {!r} ({!r})".format(
self.unresolved_address, self.server_info.address
)
)
if self.defunct():
raise ServiceUnavailable(
"Failed to write to defunct connection {!r} ({!r})".format(
self.unresolved_address, self.server_info.address
)
)
await self._send_all()
@abc.abstractmethod
async def _process_message(self, details, summary_signature,
summary_metadata):
""" Receive at most one message from the server, if available.
:return: 2-tuple of number of detail messages and number of summary
messages fetched
"""
pass
async def fetch_message(self):
if self._closed:
raise ServiceUnavailable(
"Failed to read from closed connection {!r} ({!r})".format(
self.unresolved_address, self.server_info.address
)
)
if self._defunct:
raise ServiceUnavailable(
"Failed to read from defunct connection {!r} ({!r})".format(
self.unresolved_address, self.server_info.address
)
)
if not self.responses:
return 0, 0
# Receive exactly one message
details, summary_signature, summary_metadata = \
await AsyncUtil.next(self.inbox)
res = await self._process_message(
details, summary_signature, summary_metadata
)
self.idle_since = perf_counter()
return res
async def fetch_all(self):
""" Fetch all outstanding messages.
:return: 2-tuple of number of detail messages and number of summary
messages fetched
"""
detail_count = summary_count = 0
while self.responses:
response = self.responses[0]
while not response.complete:
detail_delta, summary_delta = await self.fetch_message()
detail_count += detail_delta
summary_count += summary_delta
return detail_count, summary_count
async def _set_defunct_read(self, error=None, silent=False):
message = "Failed to read from defunct connection {!r} ({!r})".format(
self.unresolved_address, self.server_info.address
)
await self._set_defunct(message, error=error, silent=silent)
async def _set_defunct_write(self, error=None, silent=False):
message = "Failed to write data to connection {!r} ({!r})".format(
self.unresolved_address, self.server_info.address
)
await self._set_defunct(message, error=error, silent=silent)
async def _set_defunct(self, message, error=None, silent=False):
from ._pool import AsyncBoltPool
direct_driver = isinstance(self.pool, AsyncBoltPool)
if error:
log.debug("[#%04X] %s", self.socket.getsockname()[1], error)
log.error(message)
# We were attempting to receive data but the connection
# has unexpectedly terminated. So, we need to close the
# connection from the client side, and remove the address
# from the connection pool.
self._defunct = True
if not self._closing:
# If we fail while closing the connection, there is no need to
# remove the connection from the pool, nor to try to close the
# connection again.
await self.close()
if self.pool:
await self.pool.deactivate(address=self.unresolved_address)
# Iterate through the outstanding responses, and if any correspond
# to COMMIT requests then raise an error to signal that we are
# unable to confirm that the COMMIT completed successfully.
if silent:
return
for response in self.responses:
if isinstance(response, CommitResponse):
if error:
raise IncompleteCommit(message) from error
else:
raise IncompleteCommit(message)
if direct_driver:
if error:
raise ServiceUnavailable(message) from error
else:
raise ServiceUnavailable(message)
else:
if error:
raise SessionExpired(message) from error
else:
raise SessionExpired(message)
def stale(self):
return (self._stale
or (0 <= self._max_connection_lifetime
<= perf_counter() - self._creation_timestamp))
_stale = False
def set_stale(self):
self._stale = True
async def close(self):
"""Close the connection."""
if self._closed or self._closing:
return
self._closing = True
if not self._defunct:
self.goodbye()
try:
await self._send_all()
except (OSError, BoltError, DriverError):
pass
log.debug("[#%04X] C: <CLOSE>", self.local_port)
try:
self.socket.close()
except OSError:
pass
finally:
self._closed = True
async def close_non_blocking(self):
"""Set the socket to non-blocking and close it.
This will try to send the `GOODBYE` message (given the socket is not
marked as defunct). However, should the write operation require
blocking (e.g., a full network buffer), then the socket will be closed
immediately (without `GOODBYE` message).
"""
if self._closed or self._closing:
return
self.socket.settimeout(0)
await self.close()
def closed(self):
return self._closed
def defunct(self):
return self._defunct
def is_idle_for(self, timeout):
"""Check if connection has been idle for at least the given timeout.
:param timeout: timeout in seconds
:type timeout: float
:rtype: bool
"""
return perf_counter() - self.idle_since > timeout
AsyncBoltSocket.Bolt = AsyncBolt
| 5,741 | 0 | 405 |
9578398d67c4ab380e45b5e1357b9a225ddd1afc | 1,633 | py | Python | examples/ex09/process.py | oditorium/PageBuilder | 74fa95285d41ed390f46f22129a45900c1d8b474 | [
"MIT"
] | null | null | null | examples/ex09/process.py | oditorium/PageBuilder | 74fa95285d41ed390f46f22129a45900c1d8b474 | [
"MIT"
] | null | null | null | examples/ex09/process.py | oditorium/PageBuilder | 74fa95285d41ed390f46f22129a45900c1d8b474 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
process data generated by PageBuilder
"""
import json
INFN = "document.json"
OUTFN = "_DATA.json"
print ("PROCESSING ===========================================================")
########################################################################
## READING THE INPUT FILE
print ("reading", INFN)
with open(INFN, "r") as f: document_json = f.read()
print ("parsing", INFN)
document_data = json.loads(document_json)
print ("analysing {} ({} records)".format(INFN, len(document_data)))
data = []
FIELDS = ["_filename", "data"]
for r in document_data:
data.append({ k: r.get(k, None) for k in FIELDS})
print ("extracted {} records".format(len(data)))
print ("EXTRACTED DATA:", data)
########################################################################
## PROCESSING
out_sums = {}
for i in range(len(data)):
d = data[i]
sdata = d['data'].split(",")
sdata = map(int, sdata)
out_sums[d["_filename"]] = {"sum": sum(sdata)}
########################################################################
## WRITING THE OUTPUT FILE
out = {
"_select": out_sums,
# the key `_select` is special; it MUST contain a dict where the
# dict keys are the filename (from the `_filename` field); when
# a specific file `filename` is processed, the content of
# out["_select"][filename] (which must be a dict) is added to
# the environment, and can be added in the template
#"sums": 1,
}
with open(OUTFN, "w") as f: f.write(json.dumps(out))
print("OUT:", out)
print ("END PROCESSING =======================================================")
| 27.677966 | 80 | 0.509492 | #!/usr/bin/env python3
"""
process data generated by PageBuilder
"""
import json
INFN = "document.json"
OUTFN = "_DATA.json"
print ("PROCESSING ===========================================================")
########################################################################
## READING THE INPUT FILE
print ("reading", INFN)
with open(INFN, "r") as f: document_json = f.read()
print ("parsing", INFN)
document_data = json.loads(document_json)
print ("analysing {} ({} records)".format(INFN, len(document_data)))
data = []
FIELDS = ["_filename", "data"]
for r in document_data:
data.append({ k: r.get(k, None) for k in FIELDS})
print ("extracted {} records".format(len(data)))
print ("EXTRACTED DATA:", data)
########################################################################
## PROCESSING
out_sums = {}
for i in range(len(data)):
d = data[i]
sdata = d['data'].split(",")
sdata = map(int, sdata)
out_sums[d["_filename"]] = {"sum": sum(sdata)}
########################################################################
## WRITING THE OUTPUT FILE
out = {
"_select": out_sums,
# the key `_select` is special; it MUST contain a dict where the
# dict keys are the filename (from the `_filename` field); when
# a specific file `filename` is processed, the content of
# out["_select"][filename] (which must be a dict) is added to
# the environment, and can be added in the template
#"sums": 1,
}
with open(OUTFN, "w") as f: f.write(json.dumps(out))
print("OUT:", out)
print ("END PROCESSING =======================================================")
| 0 | 0 | 0 |
18e2ff9e48d9884824271b259f5494d590944f7d | 48 | py | Python | cyder/api/v1/endpoints/dhcp/vrf/__init__.py | drkitty/cyder | 1babc443cc03aa51fa3c1015bcd22f0ea2e5f0f8 | [
"BSD-3-Clause"
] | 6 | 2015-04-16T23:18:22.000Z | 2020-08-25T22:50:13.000Z | cyder/api/v1/endpoints/dhcp/vrf/__init__.py | drkitty/cyder | 1babc443cc03aa51fa3c1015bcd22f0ea2e5f0f8 | [
"BSD-3-Clause"
] | 267 | 2015-01-01T00:18:57.000Z | 2015-10-14T00:01:13.000Z | cyder/api/v1/endpoints/dhcp/vrf/__init__.py | drkitty/cyder | 1babc443cc03aa51fa3c1015bcd22f0ea2e5f0f8 | [
"BSD-3-Clause"
] | 5 | 2015-03-23T00:57:09.000Z | 2019-09-09T22:42:37.000Z | from cyder.api.v1.endpoints.dhcp.vrf import api
| 24 | 47 | 0.8125 | from cyder.api.v1.endpoints.dhcp.vrf import api
| 0 | 0 | 0 |
8a8cfc04c5c7ae8b231f967292f73edd9f04f568 | 141 | py | Python | oogli/Texture.py | brianbruggeman/oogli | 6a6f681468d609035924ede27d895afcc9d432b6 | [
"Apache-2.0"
] | 3 | 2016-01-18T22:10:51.000Z | 2016-06-10T16:02:55.000Z | oogli/Texture.py | brianbruggeman/oogli | 6a6f681468d609035924ede27d895afcc9d432b6 | [
"Apache-2.0"
] | null | null | null | oogli/Texture.py | brianbruggeman/oogli | 6a6f681468d609035924ede27d895afcc9d432b6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
| 15.666667 | 38 | 0.560284 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class Texture(object):
def __init__(self, *args, **kwds):
'''TODO: something'''
| 0 | 71 | 23 |
a2787fee555cd02e69dfc120dfda3be28257df6b | 2,548 | py | Python | pyramids/rules/token_set.py | leomauro/pyramids | 4f7a8e97e13a5ee0b037dc528e5ba72f31ac36e5 | [
"MIT"
] | 9 | 2015-09-04T22:33:40.000Z | 2019-04-11T14:05:11.000Z | pyramids/rules/token_set.py | leomauro/pyramids | 4f7a8e97e13a5ee0b037dc528e5ba72f31ac36e5 | [
"MIT"
] | 2 | 2015-09-04T22:31:44.000Z | 2017-07-29T04:11:53.000Z | pyramids/rules/token_set.py | hosford42/pyramids | 4f7a8e97e13a5ee0b037dc528e5ba72f31ac36e5 | [
"MIT"
] | 3 | 2015-10-14T12:41:26.000Z | 2022-01-08T19:43:47.000Z | import os
from sys import intern
from typing import Iterable, FrozenSet, Optional
from pyramids.categorization import Category
from pyramids.rules.leaf import LeafRule
from pyramids.word_sets import WordSetUtils
| 37.470588 | 100 | 0.642072 | import os
from sys import intern
from typing import Iterable, FrozenSet, Optional
from pyramids.categorization import Category
from pyramids.rules.leaf import LeafRule
from pyramids.word_sets import WordSetUtils
class SetRule(LeafRule):
@classmethod
def from_word_set(cls, file_path: str, verbose: bool = False) -> 'SetRule':
"""Load a word set and return it as a set rule."""
from pyramids.grammar import GrammarSyntaxError, GrammarParser
folder, filename = os.path.split(file_path)
category_definition = os.path.splitext(filename)[0]
try:
category = GrammarParser.parse_category(category_definition)
except GrammarSyntaxError as error:
raise IOError("Badly named word set file: " + file_path) from error
if verbose:
print("Loading category", str(category), "from", file_path, "...")
return SetRule(category, WordSetUtils.load_word_set(file_path), _word_set_path=file_path)
def __init__(self, category: Category, tokens: Iterable[str], *, _word_set_path: str = None):
super().__init__(category)
self._tokens = frozenset(intern(token.lower()) for token in tokens)
self._hash = hash(self._category) ^ hash(self._tokens)
self._word_set_path = _word_set_path
def __hash__(self) -> int:
return self._hash
def __eq__(self, other: 'SetRule') -> bool:
if not isinstance(other, SetRule):
return NotImplemented
return self is other or (self._hash == other._hash and self._category == other._category and
self._tokens == other._tokens)
def __ne__(self, other: 'SetRule') -> bool:
if not isinstance(other, SetRule):
return NotImplemented
return not (self == other)
def __contains__(self, token: str) -> bool:
return token.lower() in self._tokens
def __repr__(self) -> str:
if self._word_set_path is None:
if len(self.tokens) > 10:
return '<%s: %s>' % (type(self).__name__, str(self))
else:
return type(self).__name__ + repr((self.category, sorted(self.tokens)))
else:
return '%s.from_word_set(%r)' % (type(self).__name__, self.word_set_path,)
def __str__(self) -> str:
return str(self.category) + '.ctg'
@property
def tokens(self) -> FrozenSet[str]:
return self._tokens
@property
def word_set_path(self) -> Optional[str]:
return self._word_set_path
| 1,290 | 1,021 | 23 |
fc7058a10d7e658bef7595f63f5638b9966e1a4c | 6,236 | py | Python | neutron/common/config.py | plumgrid/plumgrid-quantum | dbd7e472ca28d22d694eeeba47e0738985583961 | [
"Apache-2.0"
] | 1 | 2016-04-23T21:33:31.000Z | 2016-04-23T21:33:31.000Z | neutron/common/config.py | plumgrid/plumgrid-quantum | dbd7e472ca28d22d694eeeba47e0738985583961 | [
"Apache-2.0"
] | null | null | null | neutron/common/config.py | plumgrid/plumgrid-quantum | dbd7e472ca28d22d694eeeba47e0738985583961 | [
"Apache-2.0"
] | 4 | 2015-04-14T10:06:51.000Z | 2019-10-02T01:28:34.000Z | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Neutron
"""
import os
from oslo.config import cfg
from paste import deploy
from neutron.api.v2 import attributes
from neutron.common import utils
from neutron.openstack.common.db.sqlalchemy import session as db_session
from neutron.openstack.common import log as logging
from neutron.openstack.common import rpc
from neutron.version import version_info as neutron_version
LOG = logging.getLogger(__name__)
core_opts = [
cfg.StrOpt('bind_host', default='0.0.0.0',
help=_("The host IP to bind to")),
cfg.IntOpt('bind_port', default=9696,
help=_("The port to bind to")),
cfg.StrOpt('api_paste_config', default="api-paste.ini",
help=_("The API paste config file to use")),
cfg.StrOpt('api_extensions_path', default="",
help=_("The path for API extensions")),
cfg.StrOpt('policy_file', default="policy.json",
help=_("The policy file to use")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.StrOpt('core_plugin',
help=_("The core plugin Neutron will use")),
cfg.ListOpt('service_plugins', default=[],
help=_("The service plugins Neutron will use")),
cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00",
help=_("The base MAC address Neutron will use for VIFs")),
cfg.IntOpt('mac_generation_retries', default=16,
help=_("How many times Neutron will retry MAC generation")),
cfg.BoolOpt('allow_bulk', default=True,
help=_("Allow the usage of the bulk API")),
cfg.BoolOpt('allow_pagination', default=False,
help=_("Allow the usage of the pagination")),
cfg.BoolOpt('allow_sorting', default=False,
help=_("Allow the usage of the sorting")),
cfg.StrOpt('pagination_max_limit', default="-1",
help=_("The maximum number of items returned in a single "
"response, value was 'infinite' or negative integer "
"means no limit")),
cfg.IntOpt('max_dns_nameservers', default=5,
help=_("Maximum number of DNS nameservers")),
cfg.IntOpt('max_subnet_host_routes', default=20,
help=_("Maximum number of host routes per subnet")),
cfg.IntOpt('max_fixed_ips_per_port', default=5,
help=_("Maximum number of fixed ips per port")),
cfg.IntOpt('dhcp_lease_duration', default=86400,
deprecated_name='dhcp_lease_time',
help=_("DHCP lease duration")),
cfg.BoolOpt('dhcp_agent_notification', default=True,
help=_("Allow sending resource operation"
" notification to DHCP agent")),
cfg.BoolOpt('allow_overlapping_ips', default=False,
help=_("Allow overlapping IP support in Neutron")),
cfg.StrOpt('host', default=utils.get_hostname(),
help=_("The hostname Neutron is running on")),
cfg.BoolOpt('force_gateway_on_subnet', default=False,
help=_("Ensure that configured gateway is on subnet")),
]
core_cli_opts = [
cfg.StrOpt('state_path',
default='/var/lib/neutron',
help=_("Where to store Neutron state files. "
"This directory must be writable by the agent.")),
]
# Register the configuration options
cfg.CONF.register_opts(core_opts)
cfg.CONF.register_cli_opts(core_cli_opts)
# Ensure that the control exchange is set correctly
rpc.set_defaults(control_exchange='neutron')
_SQL_CONNECTION_DEFAULT = 'sqlite://'
# Update the default QueuePool parameters. These can be tweaked by the
# configuration variables - max_pool_size, max_overflow and pool_timeout
db_session.set_defaults(sql_connection=_SQL_CONNECTION_DEFAULT,
sqlite_db='', max_pool_size=10,
max_overflow=20, pool_timeout=10)
def setup_logging(conf):
"""Sets up the logging options for a log with supplied name.
:param conf: a cfg.ConfOpts object
"""
product_name = "neutron"
logging.setup(product_name)
LOG.info(_("Logging enabled!"))
def load_paste_app(app_name):
"""Builds and returns a WSGI app from a paste config file.
:param app_name: Name of the application to load
:raises ConfigFilesNotFoundError when config file cannot be located
:raises RuntimeError when application cannot be loaded from config file
"""
config_path = cfg.CONF.find_file(cfg.CONF.api_paste_config)
if not config_path:
raise cfg.ConfigFilesNotFoundError(
config_files=[cfg.CONF.api_paste_config])
config_path = os.path.abspath(config_path)
LOG.info(_("Config paste file: %s"), config_path)
try:
app = deploy.loadapp("config:%s" % config_path, name=app_name)
except (LookupError, ImportError):
msg = (_("Unable to load %(app_name)s from "
"configuration file %(config_path)s.") %
{'app_name': app_name,
'config_path': config_path})
LOG.exception(msg)
raise RuntimeError(msg)
return app
| 39.974359 | 78 | 0.657473 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Neutron
"""
import os
from oslo.config import cfg
from paste import deploy
from neutron.api.v2 import attributes
from neutron.common import utils
from neutron.openstack.common.db.sqlalchemy import session as db_session
from neutron.openstack.common import log as logging
from neutron.openstack.common import rpc
from neutron.version import version_info as neutron_version
LOG = logging.getLogger(__name__)
core_opts = [
cfg.StrOpt('bind_host', default='0.0.0.0',
help=_("The host IP to bind to")),
cfg.IntOpt('bind_port', default=9696,
help=_("The port to bind to")),
cfg.StrOpt('api_paste_config', default="api-paste.ini",
help=_("The API paste config file to use")),
cfg.StrOpt('api_extensions_path', default="",
help=_("The path for API extensions")),
cfg.StrOpt('policy_file', default="policy.json",
help=_("The policy file to use")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.StrOpt('core_plugin',
help=_("The core plugin Neutron will use")),
cfg.ListOpt('service_plugins', default=[],
help=_("The service plugins Neutron will use")),
cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00",
help=_("The base MAC address Neutron will use for VIFs")),
cfg.IntOpt('mac_generation_retries', default=16,
help=_("How many times Neutron will retry MAC generation")),
cfg.BoolOpt('allow_bulk', default=True,
help=_("Allow the usage of the bulk API")),
cfg.BoolOpt('allow_pagination', default=False,
help=_("Allow the usage of the pagination")),
cfg.BoolOpt('allow_sorting', default=False,
help=_("Allow the usage of the sorting")),
cfg.StrOpt('pagination_max_limit', default="-1",
help=_("The maximum number of items returned in a single "
"response, value was 'infinite' or negative integer "
"means no limit")),
cfg.IntOpt('max_dns_nameservers', default=5,
help=_("Maximum number of DNS nameservers")),
cfg.IntOpt('max_subnet_host_routes', default=20,
help=_("Maximum number of host routes per subnet")),
cfg.IntOpt('max_fixed_ips_per_port', default=5,
help=_("Maximum number of fixed ips per port")),
cfg.IntOpt('dhcp_lease_duration', default=86400,
deprecated_name='dhcp_lease_time',
help=_("DHCP lease duration")),
cfg.BoolOpt('dhcp_agent_notification', default=True,
help=_("Allow sending resource operation"
" notification to DHCP agent")),
cfg.BoolOpt('allow_overlapping_ips', default=False,
help=_("Allow overlapping IP support in Neutron")),
cfg.StrOpt('host', default=utils.get_hostname(),
help=_("The hostname Neutron is running on")),
cfg.BoolOpt('force_gateway_on_subnet', default=False,
help=_("Ensure that configured gateway is on subnet")),
]
core_cli_opts = [
cfg.StrOpt('state_path',
default='/var/lib/neutron',
help=_("Where to store Neutron state files. "
"This directory must be writable by the agent.")),
]
# Register the configuration options
cfg.CONF.register_opts(core_opts)
cfg.CONF.register_cli_opts(core_cli_opts)
# Ensure that the control exchange is set correctly
rpc.set_defaults(control_exchange='neutron')
_SQL_CONNECTION_DEFAULT = 'sqlite://'
# Update the default QueuePool parameters. These can be tweaked by the
# configuration variables - max_pool_size, max_overflow and pool_timeout
db_session.set_defaults(sql_connection=_SQL_CONNECTION_DEFAULT,
sqlite_db='', max_pool_size=10,
max_overflow=20, pool_timeout=10)
def parse(args):
cfg.CONF(args=args, project='neutron',
version='%%prog %s' % neutron_version.release_string())
# Validate that the base_mac is of the correct format
msg = attributes._validate_regex(cfg.CONF.base_mac,
attributes.MAC_PATTERN)
if msg:
msg = _("Base MAC: %s") % msg
raise Exception(msg)
def setup_logging(conf):
"""Sets up the logging options for a log with supplied name.
:param conf: a cfg.ConfOpts object
"""
product_name = "neutron"
logging.setup(product_name)
LOG.info(_("Logging enabled!"))
def load_paste_app(app_name):
"""Builds and returns a WSGI app from a paste config file.
:param app_name: Name of the application to load
:raises ConfigFilesNotFoundError when config file cannot be located
:raises RuntimeError when application cannot be loaded from config file
"""
config_path = cfg.CONF.find_file(cfg.CONF.api_paste_config)
if not config_path:
raise cfg.ConfigFilesNotFoundError(
config_files=[cfg.CONF.api_paste_config])
config_path = os.path.abspath(config_path)
LOG.info(_("Config paste file: %s"), config_path)
try:
app = deploy.loadapp("config:%s" % config_path, name=app_name)
except (LookupError, ImportError):
msg = (_("Unable to load %(app_name)s from "
"configuration file %(config_path)s.") %
{'app_name': app_name,
'config_path': config_path})
LOG.exception(msg)
raise RuntimeError(msg)
return app
| 362 | 0 | 23 |
b12bdd3b7613ac6f1ca82e2ac22d65ec1929d997 | 3,978 | py | Python | code/tmp_rtrip/test/test_structmembers.py | emilyemorehouse/ast-and-me | 3f58117512e125e1ecbe3c72f2f0d26adb80b7b3 | [
"MIT"
] | 24 | 2018-01-23T05:28:40.000Z | 2021-04-13T20:52:59.000Z | code/tmp_rtrip/test/test_structmembers.py | emilyemorehouse/ast-and-me | 3f58117512e125e1ecbe3c72f2f0d26adb80b7b3 | [
"MIT"
] | 17 | 2017-12-21T18:32:31.000Z | 2018-12-18T17:09:50.000Z | code/tmp_rtrip/test/test_structmembers.py | emilyemorehouse/ast-and-me | 3f58117512e125e1ecbe3c72f2f0d26adb80b7b3 | [
"MIT"
] | null | null | null | import unittest
from test import support
support.import_module('_testcapi')
from _testcapi import _test_structmembersType, CHAR_MAX, CHAR_MIN, UCHAR_MAX, SHRT_MAX, SHRT_MIN, USHRT_MAX, INT_MAX, INT_MIN, UINT_MAX, LONG_MAX, LONG_MIN, ULONG_MAX, LLONG_MAX, LLONG_MIN, ULLONG_MAX, PY_SSIZE_T_MAX, PY_SSIZE_T_MIN
ts = _test_structmembersType(False, 1, 2, 3, 4, 5, 6, 7, 8, 23, 9.99999,
10.101010101, 'hi')
if __name__ == '__main__':
unittest.main()
| 35.20354 | 232 | 0.65083 | import unittest
from test import support
support.import_module('_testcapi')
from _testcapi import _test_structmembersType, CHAR_MAX, CHAR_MIN, UCHAR_MAX, SHRT_MAX, SHRT_MIN, USHRT_MAX, INT_MAX, INT_MIN, UINT_MAX, LONG_MAX, LONG_MIN, ULONG_MAX, LLONG_MAX, LLONG_MIN, ULLONG_MAX, PY_SSIZE_T_MAX, PY_SSIZE_T_MIN
ts = _test_structmembersType(False, 1, 2, 3, 4, 5, 6, 7, 8, 23, 9.99999,
10.101010101, 'hi')
class ReadWriteTests(unittest.TestCase):
def test_bool(self):
ts.T_BOOL = True
self.assertEqual(ts.T_BOOL, True)
ts.T_BOOL = False
self.assertEqual(ts.T_BOOL, False)
self.assertRaises(TypeError, setattr, ts, 'T_BOOL', 1)
def test_byte(self):
ts.T_BYTE = CHAR_MAX
self.assertEqual(ts.T_BYTE, CHAR_MAX)
ts.T_BYTE = CHAR_MIN
self.assertEqual(ts.T_BYTE, CHAR_MIN)
ts.T_UBYTE = UCHAR_MAX
self.assertEqual(ts.T_UBYTE, UCHAR_MAX)
def test_short(self):
ts.T_SHORT = SHRT_MAX
self.assertEqual(ts.T_SHORT, SHRT_MAX)
ts.T_SHORT = SHRT_MIN
self.assertEqual(ts.T_SHORT, SHRT_MIN)
ts.T_USHORT = USHRT_MAX
self.assertEqual(ts.T_USHORT, USHRT_MAX)
def test_int(self):
ts.T_INT = INT_MAX
self.assertEqual(ts.T_INT, INT_MAX)
ts.T_INT = INT_MIN
self.assertEqual(ts.T_INT, INT_MIN)
ts.T_UINT = UINT_MAX
self.assertEqual(ts.T_UINT, UINT_MAX)
def test_long(self):
ts.T_LONG = LONG_MAX
self.assertEqual(ts.T_LONG, LONG_MAX)
ts.T_LONG = LONG_MIN
self.assertEqual(ts.T_LONG, LONG_MIN)
ts.T_ULONG = ULONG_MAX
self.assertEqual(ts.T_ULONG, ULONG_MAX)
def test_py_ssize_t(self):
ts.T_PYSSIZET = PY_SSIZE_T_MAX
self.assertEqual(ts.T_PYSSIZET, PY_SSIZE_T_MAX)
ts.T_PYSSIZET = PY_SSIZE_T_MIN
self.assertEqual(ts.T_PYSSIZET, PY_SSIZE_T_MIN)
@unittest.skipUnless(hasattr(ts, 'T_LONGLONG'), 'long long not present')
def test_longlong(self):
ts.T_LONGLONG = LLONG_MAX
self.assertEqual(ts.T_LONGLONG, LLONG_MAX)
ts.T_LONGLONG = LLONG_MIN
self.assertEqual(ts.T_LONGLONG, LLONG_MIN)
ts.T_ULONGLONG = ULLONG_MAX
self.assertEqual(ts.T_ULONGLONG, ULLONG_MAX)
ts.T_LONGLONG = 3
self.assertEqual(ts.T_LONGLONG, 3)
ts.T_ULONGLONG = 4
self.assertEqual(ts.T_ULONGLONG, 4)
def test_bad_assignments(self):
integer_attributes = ['T_BOOL', 'T_BYTE', 'T_UBYTE', 'T_SHORT',
'T_USHORT', 'T_INT', 'T_UINT', 'T_LONG', 'T_ULONG', 'T_PYSSIZET']
if hasattr(ts, 'T_LONGLONG'):
integer_attributes.extend(['T_LONGLONG', 'T_ULONGLONG'])
for nonint in (None, 3.2j, 'full of eels', {}, []):
for attr in integer_attributes:
self.assertRaises(TypeError, setattr, ts, attr, nonint)
def test_inplace_string(self):
self.assertEqual(ts.T_STRING_INPLACE, 'hi')
self.assertRaises(TypeError, setattr, ts, 'T_STRING_INPLACE', 's')
self.assertRaises(TypeError, delattr, ts, 'T_STRING_INPLACE')
class TestWarnings(unittest.TestCase):
def test_byte_max(self):
with support.check_warnings(('', RuntimeWarning)):
ts.T_BYTE = CHAR_MAX + 1
def test_byte_min(self):
with support.check_warnings(('', RuntimeWarning)):
ts.T_BYTE = CHAR_MIN - 1
def test_ubyte_max(self):
with support.check_warnings(('', RuntimeWarning)):
ts.T_UBYTE = UCHAR_MAX + 1
def test_short_max(self):
with support.check_warnings(('', RuntimeWarning)):
ts.T_SHORT = SHRT_MAX + 1
def test_short_min(self):
with support.check_warnings(('', RuntimeWarning)):
ts.T_SHORT = SHRT_MIN - 1
def test_ushort_max(self):
with support.check_warnings(('', RuntimeWarning)):
ts.T_USHORT = USHRT_MAX + 1
if __name__ == '__main__':
unittest.main()
| 2,956 | 356 | 208 |
ace8a02a07baa1d3676ee33620ccb26e1bf748c5 | 5,705 | py | Python | src/predict.py | jamesmcclain/algae-model | 45e3e83544034022aba16ad1ed254f1445e4bb1b | [
"MIT"
] | null | null | null | src/predict.py | jamesmcclain/algae-model | 45e3e83544034022aba16ad1ed254f1445e4bb1b | [
"MIT"
] | null | null | null | src/predict.py | jamesmcclain/algae-model | 45e3e83544034022aba16ad1ed254f1445e4bb1b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import copy
import logging
import sys
import warnings
import numpy as np
import rasterio as rio
import torch
import torch.hub
import tqdm
from rasterio.windows import Window
BACKBONES = [
'vgg16', 'densenet161', 'shufflenet_v2_x1_0', 'mobilenet_v2',
'mobilenet_v3_large', 'mobilenet_v3_small', 'resnet18', 'resnet34',
'resnet50', 'resnet101', 'resnet152', 'efficientnet_b0', 'efficientnet_b1',
'efficientnet_b2', 'efficientnet_b3', 'efficientnet_b4', 'efficientnet_b5',
'efficientnet_b6', 'efficientnet_b7', 'fpn_resnet18', 'fpn_resnet34',
'fpn_resnet50'
]
if __name__ == '__main__':
warnings.filterwarnings('ignore')
args = cli_parser().parse_args()
logging.basicConfig(stream=sys.stderr, level=logging.INFO, format='%(asctime)-15s %(message)s')
log = logging.getLogger()
n = args.window_size
device = torch.device(args.device)
model = torch.hub.load('jamesmcclain/algae-classifier:730726f5bccc679fa334da91fe4dc4cb71a35208',
'make_algae_model',
in_channels=[4, 12, 224],
prescale=args.prescale,
backbone_str=args.backbone,
pretrained=False)
model.load_state_dict(torch.load(args.pth_load))
model.to(device)
model.eval()
if args.outfile is None:
model_name = args.pth_load.split('/')[-1].split('.')[0]
args.outfile = [transmute(f) for f in args.infile]
for (infile, outfile) in zip(args.infile, args.outfile):
log.info(outfile)
with rio.open(infile, 'r') as infile_ds, torch.no_grad():
out_raw_profile = copy.deepcopy(infile_ds.profile)
out_raw_profile.update({
'compress': 'lzw',
'dtype': np.float32,
'count': 1,
'bigtiff': 'yes',
'sparse_ok': 'yes',
'tiled': 'yes',
})
width = infile_ds.width
height = infile_ds.height
bandcount = infile_ds.count
ar_out = torch.zeros((1, height, width), dtype=torch.float32).to(device)
pixel_hits = torch.zeros((1, height, width), dtype=torch.uint8).to(device)
if bandcount == 224:
indexes = list(range(1, 224 + 1))
elif bandcount in {12, 13}:
indexes = list(range(1, 12 + 1))
# NOTE: 13 bands does not indicate L1C support, this is
# for Franklin COGs that have an extra band.
bandcount = 12
elif bandcount == 4:
indexes = list(range(1, 4 + 1))
elif bandcount == 5:
indexes = [1, 2, 3, 5]
bandcount = 4
else:
raise Exception(f'bands={bandcount}')
# gather up batches
batches = []
for i in range(0, width - n, args.stride):
for j in range(0, height - n, args.stride):
batches.append((i, j))
batches = [batches[i:i + args.chunksize] for i in range(0, len(batches), args.chunksize)]
for batch in tqdm.tqdm(batches):
windows = [infile_ds.read(indexes, window=Window(i, j, n, n)) for (i, j) in batch]
windows = [w.astype(np.float32) for w in windows]
if args.ndwi_mask:
windows = [w * (((w[2] - w[7]) / (w[2] + w[7])) > 0.0) for w in windows]
try:
windows = np.stack(windows, axis=0)
except:
continue
windows = torch.from_numpy(windows).to(dtype=torch.float32, device=device)
prob = model(windows)
for k, (i, j) in enumerate(batch):
if 'seg' in prob:
_prob = torch.sigmoid(prob.get('seg')[k, 1]) - torch.sigmoid(prob.get('seg')[k, 0])
ar_out[0, j:(j + n), i:(i + n)] += _prob
else:
ar_out[0, j:(j + n), i:(i + n)] += torch.sigmoid(prob.get('class')[k, 0])
pixel_hits[0, j:(j + n), i:(i + n)] += 1
# Bring results back to CPU
ar_out /= pixel_hits
ar_out = ar_out.cpu().numpy()
# Write results to file
with rio.open(outfile, 'w', **out_raw_profile) as outfile_raw_ds:
outfile_raw_ds.write(ar_out[0], indexes=1)
| 39.895105 | 107 | 0.564242 | #!/usr/bin/env python3
import argparse
import copy
import logging
import sys
import warnings
import numpy as np
import rasterio as rio
import torch
import torch.hub
import tqdm
from rasterio.windows import Window
BACKBONES = [
'vgg16', 'densenet161', 'shufflenet_v2_x1_0', 'mobilenet_v2',
'mobilenet_v3_large', 'mobilenet_v3_small', 'resnet18', 'resnet34',
'resnet50', 'resnet101', 'resnet152', 'efficientnet_b0', 'efficientnet_b1',
'efficientnet_b2', 'efficientnet_b3', 'efficientnet_b4', 'efficientnet_b5',
'efficientnet_b6', 'efficientnet_b7', 'fpn_resnet18', 'fpn_resnet34',
'fpn_resnet50'
]
def cli_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--backbone', required=True, type=str, choices=BACKBONES)
parser.add_argument('--chunksize', required=False, type=int, default=256)
parser.add_argument('--device', required=False, type=str, default='cuda', choices=['cuda', 'cpu'])
parser.add_argument('--infile', required=True, type=str, nargs='+')
parser.add_argument('--outfile', required=False, default=None, type=str, nargs='+')
parser.add_argument('--prescale', required=False, type=int, default=1)
parser.add_argument('--pth-load', required=True, type=str)
parser.add_argument('--stride', required=False, type=int, default=13)
parser.add_argument('--window-size', required=False, type=int, default=32)
parser.add_argument('--ndwi-mask', required=False, dest='ndwi_mask', action='store_true')
parser.set_defaults(ndwi_mask=False)
return parser
if __name__ == '__main__':
warnings.filterwarnings('ignore')
args = cli_parser().parse_args()
logging.basicConfig(stream=sys.stderr, level=logging.INFO, format='%(asctime)-15s %(message)s')
log = logging.getLogger()
n = args.window_size
device = torch.device(args.device)
model = torch.hub.load('jamesmcclain/algae-classifier:730726f5bccc679fa334da91fe4dc4cb71a35208',
'make_algae_model',
in_channels=[4, 12, 224],
prescale=args.prescale,
backbone_str=args.backbone,
pretrained=False)
model.load_state_dict(torch.load(args.pth_load))
model.to(device)
model.eval()
if args.outfile is None:
model_name = args.pth_load.split('/')[-1].split('.')[0]
def transmute(filename):
filename = filename.split('/')[-1]
filename = f"./predict-{model_name}-{filename}"
if not filename.endswith('.tiff'):
filename = filename.replace('.tif', '.tiff')
return filename
args.outfile = [transmute(f) for f in args.infile]
for (infile, outfile) in zip(args.infile, args.outfile):
log.info(outfile)
with rio.open(infile, 'r') as infile_ds, torch.no_grad():
out_raw_profile = copy.deepcopy(infile_ds.profile)
out_raw_profile.update({
'compress': 'lzw',
'dtype': np.float32,
'count': 1,
'bigtiff': 'yes',
'sparse_ok': 'yes',
'tiled': 'yes',
})
width = infile_ds.width
height = infile_ds.height
bandcount = infile_ds.count
ar_out = torch.zeros((1, height, width), dtype=torch.float32).to(device)
pixel_hits = torch.zeros((1, height, width), dtype=torch.uint8).to(device)
if bandcount == 224:
indexes = list(range(1, 224 + 1))
elif bandcount in {12, 13}:
indexes = list(range(1, 12 + 1))
# NOTE: 13 bands does not indicate L1C support, this is
# for Franklin COGs that have an extra band.
bandcount = 12
elif bandcount == 4:
indexes = list(range(1, 4 + 1))
elif bandcount == 5:
indexes = [1, 2, 3, 5]
bandcount = 4
else:
raise Exception(f'bands={bandcount}')
# gather up batches
batches = []
for i in range(0, width - n, args.stride):
for j in range(0, height - n, args.stride):
batches.append((i, j))
batches = [batches[i:i + args.chunksize] for i in range(0, len(batches), args.chunksize)]
for batch in tqdm.tqdm(batches):
windows = [infile_ds.read(indexes, window=Window(i, j, n, n)) for (i, j) in batch]
windows = [w.astype(np.float32) for w in windows]
if args.ndwi_mask:
windows = [w * (((w[2] - w[7]) / (w[2] + w[7])) > 0.0) for w in windows]
try:
windows = np.stack(windows, axis=0)
except:
continue
windows = torch.from_numpy(windows).to(dtype=torch.float32, device=device)
prob = model(windows)
for k, (i, j) in enumerate(batch):
if 'seg' in prob:
_prob = torch.sigmoid(prob.get('seg')[k, 1]) - torch.sigmoid(prob.get('seg')[k, 0])
ar_out[0, j:(j + n), i:(i + n)] += _prob
else:
ar_out[0, j:(j + n), i:(i + n)] += torch.sigmoid(prob.get('class')[k, 0])
pixel_hits[0, j:(j + n), i:(i + n)] += 1
# Bring results back to CPU
ar_out /= pixel_hits
ar_out = ar_out.cpu().numpy()
# Write results to file
with rio.open(outfile, 'w', **out_raw_profile) as outfile_raw_ds:
outfile_raw_ds.write(ar_out[0], indexes=1)
| 1,150 | 0 | 53 |
ed5cc620f755b91673991e6e44482f82fb01cfdf | 669 | py | Python | tz_detect/defaults.py | dkirkham/django-tz-detect | ec3c66a967e2518adf070bfd42a9076471f1bc2a | [
"MIT"
] | null | null | null | tz_detect/defaults.py | dkirkham/django-tz-detect | ec3c66a967e2518adf070bfd42a9076471f1bc2a | [
"MIT"
] | null | null | null | tz_detect/defaults.py | dkirkham/django-tz-detect | ec3c66a967e2518adf070bfd42a9076471f1bc2a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.conf import settings
# How often to check
TZ_DETECT_PERIOD = getattr(settings, 'TZ_DETECT_PERIOD', 3*3600)
# Version of moment and moment-timezone to load
TZ_DETECT_SCRIPTS = getattr(settings, 'TZ_DETECT_SCRIPTS', [
'<script src="https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.24.0/moment.min.js" integrity="sha256-4iQZ6BVL4qNKlQ27TExEhBN1HFPvAvAMbFavKKosSWQ=" crossorigin="anonymous"></script>',
'<script src="https://cdnjs.cloudflare.com/ajax/libs/moment-timezone/0.5.28/moment-timezone-with-data-10-year-range.min.js" integrity="sha256-HS6OzSyhM0rDG0PhZGwf/FvptBzIJnv4MgL2pe87xgg=" crossorigin="anonymous"></script>'
]) | 55.75 | 224 | 0.77429 | # -*- coding: utf-8 -*-
from django.conf import settings
# How often to check
TZ_DETECT_PERIOD = getattr(settings, 'TZ_DETECT_PERIOD', 3*3600)
# Version of moment and moment-timezone to load
TZ_DETECT_SCRIPTS = getattr(settings, 'TZ_DETECT_SCRIPTS', [
'<script src="https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.24.0/moment.min.js" integrity="sha256-4iQZ6BVL4qNKlQ27TExEhBN1HFPvAvAMbFavKKosSWQ=" crossorigin="anonymous"></script>',
'<script src="https://cdnjs.cloudflare.com/ajax/libs/moment-timezone/0.5.28/moment-timezone-with-data-10-year-range.min.js" integrity="sha256-HS6OzSyhM0rDG0PhZGwf/FvptBzIJnv4MgL2pe87xgg=" crossorigin="anonymous"></script>'
]) | 0 | 0 | 0 |
0051e0bd2a9085c3ed7b3685be94b2da7bc22176 | 1,525 | py | Python | cs/algorithms/graph/kargers.py | TylerYep/workshop | 69b19afc81c1b84b7f60723077670fb789b55744 | [
"MIT"
] | 1 | 2021-06-14T01:20:09.000Z | 2021-06-14T01:20:09.000Z | cs/algorithms/graph/kargers.py | TylerYep/workshop | 69b19afc81c1b84b7f60723077670fb789b55744 | [
"MIT"
] | null | null | null | cs/algorithms/graph/kargers.py | TylerYep/workshop | 69b19afc81c1b84b7f60723077670fb789b55744 | [
"MIT"
] | null | null | null | import random
from cs.structures import Edge, Graph, Node, V
def kargers_min_cut(orig_graph: Graph[V]) -> set[Edge[V]]:
"""
Partitions a graph using Karger's Algorithm. Works on directed and undirected
graphs, but involves random choices, so it does not give consistent outputs.
Args:
graph: A dictionary containing adacency lists for the graph.
Nodes must be strings.
Returns:
The cutset of the cut found by Karger's Algorithm.
"""
graph: Graph[Node[tuple[V, ...]]] = Graph.from_graph(
orig_graph, node_fn=lambda x: Node((x,))
)
while len(graph) > 2:
edge = random.choice(tuple(graph.edges))
# Contract edge (u, v) to new node uv
uv = Node(edge.start.data + edge.end.data)
uv_neighbors = graph[edge.start] | graph[edge.end]
del uv_neighbors[edge.start]
del uv_neighbors[edge.end]
graph.add_node(uv)
for neighbor in uv_neighbors:
graph.add_edge(uv, neighbor)
if graph.is_directed:
graph.add_edge(neighbor, uv)
# Remove nodes u and v.
graph.remove_node(edge.start)
graph.remove_node(edge.end)
# Find cutset.
group1, group2 = graph.nodes
result_set = set()
for subnode in group1.data:
for subneighbor in group2.data:
if subneighbor in orig_graph[subnode] or subnode in orig_graph[subneighbor]:
result_set.add(orig_graph[subnode][subneighbor])
return result_set
| 31.122449 | 88 | 0.633443 | import random
from cs.structures import Edge, Graph, Node, V
def kargers_min_cut(orig_graph: Graph[V]) -> set[Edge[V]]:
"""
Partitions a graph using Karger's Algorithm. Works on directed and undirected
graphs, but involves random choices, so it does not give consistent outputs.
Args:
graph: A dictionary containing adacency lists for the graph.
Nodes must be strings.
Returns:
The cutset of the cut found by Karger's Algorithm.
"""
graph: Graph[Node[tuple[V, ...]]] = Graph.from_graph(
orig_graph, node_fn=lambda x: Node((x,))
)
while len(graph) > 2:
edge = random.choice(tuple(graph.edges))
# Contract edge (u, v) to new node uv
uv = Node(edge.start.data + edge.end.data)
uv_neighbors = graph[edge.start] | graph[edge.end]
del uv_neighbors[edge.start]
del uv_neighbors[edge.end]
graph.add_node(uv)
for neighbor in uv_neighbors:
graph.add_edge(uv, neighbor)
if graph.is_directed:
graph.add_edge(neighbor, uv)
# Remove nodes u and v.
graph.remove_node(edge.start)
graph.remove_node(edge.end)
# Find cutset.
group1, group2 = graph.nodes
result_set = set()
for subnode in group1.data:
for subneighbor in group2.data:
if subneighbor in orig_graph[subnode] or subnode in orig_graph[subneighbor]:
result_set.add(orig_graph[subnode][subneighbor])
return result_set
| 0 | 0 | 0 |
2a8f7460a21b7cad5dc74cfff3405c3af0fe2006 | 471 | py | Python | Python/FindDigits.py | MuriloRoque/coding_challenges | dd1ca31bc1c9e77026ef625fbca7f8938d3e965e | [
"MIT"
] | 7 | 2020-06-03T19:19:07.000Z | 2022-01-08T03:00:59.000Z | Python/FindDigits.py | MuriloRoque/coding-challenges | dd1ca31bc1c9e77026ef625fbca7f8938d3e965e | [
"MIT"
] | 4 | 2020-05-25T10:31:26.000Z | 2022-02-26T08:03:55.000Z | Python/FindDigits.py | MuriloRoque/coding_challenges | dd1ca31bc1c9e77026ef625fbca7f8938d3e965e | [
"MIT"
] | null | null | null | #!/bin/python3
import os
# Complete the findDigits function below.
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
n = int(input())
result = findDigits(n)
fptr.write(str(result) + '\n')
fptr.close()
| 15.193548 | 47 | 0.501062 | #!/bin/python3
import os
# Complete the findDigits function below.
def findDigits(n):
s = str(n)
res = 0
for i in s:
if int(i) != 0:
if n % int(i) == 0:
res += 1
return res
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
n = int(input())
result = findDigits(n)
fptr.write(str(result) + '\n')
fptr.close()
| 136 | 0 | 23 |
bc45dbd742de9463f2a3a8dabbfff37df96ff9fa | 3,467 | py | Python | dfirtrack_artifacts/urls.py | thomas-kropeit/dfirtrack | b1e0e659af7bc8085cfe2d269ddc651f9f4ba585 | [
"Apache-2.0"
] | null | null | null | dfirtrack_artifacts/urls.py | thomas-kropeit/dfirtrack | b1e0e659af7bc8085cfe2d269ddc651f9f4ba585 | [
"Apache-2.0"
] | 6 | 2022-03-16T12:30:51.000Z | 2022-03-28T01:34:45.000Z | dfirtrack_artifacts/urls.py | thomas-kropeit/dfirtrack | b1e0e659af7bc8085cfe2d269ddc651f9f4ba585 | [
"Apache-2.0"
] | null | null | null | from django.urls import path
from dfirtrack_artifacts.creator import artifact_creator
from dfirtrack_artifacts.exporter.spreadsheet import xls
from dfirtrack_artifacts.views import (
artifact_view,
artifactpriority_view,
artifactstatus_view,
artifacttype_view,
)
urlpatterns = (
# urls for Artifact
path(
r'artifact/',
artifact_view.ArtifactListView.as_view(),
name='artifacts_artifact_list',
),
path(
r'artifact/closed/',
artifact_view.ArtifactClosedView.as_view(),
name='artifacts_artifact_closed',
),
path(
r'artifact/all/',
artifact_view.ArtifactAllView.as_view(),
name='artifacts_artifact_all',
),
path(
r'artifact/create/',
artifact_view.ArtifactCreateView.as_view(),
name='artifacts_artifact_create',
),
path(
r'artifact/detail/<int:pk>/',
artifact_view.ArtifactDetailView.as_view(),
name='artifacts_artifact_detail',
),
path(
r'artifact/update/<int:pk>/',
artifact_view.ArtifactUpdateView.as_view(),
name='artifacts_artifact_update',
),
path(
r'artifact/<int:pk>/set_user/',
artifact_view.ArtifactSetUser.as_view(),
name='artifact_set_user',
),
path(
r'artifact/<int:pk>/unset_user/',
artifact_view.ArtifactUnsetUser.as_view(),
name='artifact_unset_user',
),
path(
r'artifact/creator/', artifact_creator.artifact_creator, name='artifact_creator'
),
path(
r'artifact/exporter/spreadsheet/xls/artifact/',
xls.artifact,
name='artifact_exporter_spreadsheet_xls',
),
path(
r'artifact/exporter/spreadsheet/xls/artifact/cron/',
xls.artifact_create_cron,
name='artifact_exporter_spreadsheet_xls_cron',
),
)
urlpatterns += (
# urls for Artifactpriority
path(
r'artifactpriority/',
artifactpriority_view.ArtifactpriorityListView.as_view(),
name='artifacts_artifactpriority_list',
),
path(
r'artifactpriority/detail/<int:pk>/',
artifactpriority_view.ArtifactpriorityDetailView.as_view(),
name='artifacts_artifactpriority_detail',
),
)
urlpatterns += (
# urls for Artifactstatus
path(
r'artifactstatus/',
artifactstatus_view.ArtifactstatusListView.as_view(),
name='artifacts_artifactstatus_list',
),
path(
r'artifactstatus/detail/<int:pk>/',
artifactstatus_view.ArtifactstatusDetailView.as_view(),
name='artifacts_artifactstatus_detail',
),
)
urlpatterns += (
# urls for Artifacttype
path(
r'artifacttype/',
artifacttype_view.ArtifacttypeListView.as_view(),
name='artifacts_artifacttype_list',
),
path(
r'artifacttype/create/',
artifacttype_view.ArtifacttypeCreateView.as_view(),
name='artifacts_artifacttype_create',
),
path(
r'artifacttype/add_popup/',
artifacttype_view.ArtifacttypeCreatePopup.as_view(),
name='artifacttype_add_popup',
),
path(
r'artifacttype/detail/<int:pk>/',
artifacttype_view.ArtifacttypeDetailView.as_view(),
name='artifacts_artifacttype_detail',
),
path(
r'artifacttype/update/<int:pk>/',
artifacttype_view.ArtifacttypeUpdateView.as_view(),
name='artifacts_artifacttype_update',
),
)
| 27.736 | 88 | 0.651284 | from django.urls import path
from dfirtrack_artifacts.creator import artifact_creator
from dfirtrack_artifacts.exporter.spreadsheet import xls
from dfirtrack_artifacts.views import (
artifact_view,
artifactpriority_view,
artifactstatus_view,
artifacttype_view,
)
urlpatterns = (
# urls for Artifact
path(
r'artifact/',
artifact_view.ArtifactListView.as_view(),
name='artifacts_artifact_list',
),
path(
r'artifact/closed/',
artifact_view.ArtifactClosedView.as_view(),
name='artifacts_artifact_closed',
),
path(
r'artifact/all/',
artifact_view.ArtifactAllView.as_view(),
name='artifacts_artifact_all',
),
path(
r'artifact/create/',
artifact_view.ArtifactCreateView.as_view(),
name='artifacts_artifact_create',
),
path(
r'artifact/detail/<int:pk>/',
artifact_view.ArtifactDetailView.as_view(),
name='artifacts_artifact_detail',
),
path(
r'artifact/update/<int:pk>/',
artifact_view.ArtifactUpdateView.as_view(),
name='artifacts_artifact_update',
),
path(
r'artifact/<int:pk>/set_user/',
artifact_view.ArtifactSetUser.as_view(),
name='artifact_set_user',
),
path(
r'artifact/<int:pk>/unset_user/',
artifact_view.ArtifactUnsetUser.as_view(),
name='artifact_unset_user',
),
path(
r'artifact/creator/', artifact_creator.artifact_creator, name='artifact_creator'
),
path(
r'artifact/exporter/spreadsheet/xls/artifact/',
xls.artifact,
name='artifact_exporter_spreadsheet_xls',
),
path(
r'artifact/exporter/spreadsheet/xls/artifact/cron/',
xls.artifact_create_cron,
name='artifact_exporter_spreadsheet_xls_cron',
),
)
urlpatterns += (
# urls for Artifactpriority
path(
r'artifactpriority/',
artifactpriority_view.ArtifactpriorityListView.as_view(),
name='artifacts_artifactpriority_list',
),
path(
r'artifactpriority/detail/<int:pk>/',
artifactpriority_view.ArtifactpriorityDetailView.as_view(),
name='artifacts_artifactpriority_detail',
),
)
urlpatterns += (
# urls for Artifactstatus
path(
r'artifactstatus/',
artifactstatus_view.ArtifactstatusListView.as_view(),
name='artifacts_artifactstatus_list',
),
path(
r'artifactstatus/detail/<int:pk>/',
artifactstatus_view.ArtifactstatusDetailView.as_view(),
name='artifacts_artifactstatus_detail',
),
)
urlpatterns += (
# urls for Artifacttype
path(
r'artifacttype/',
artifacttype_view.ArtifacttypeListView.as_view(),
name='artifacts_artifacttype_list',
),
path(
r'artifacttype/create/',
artifacttype_view.ArtifacttypeCreateView.as_view(),
name='artifacts_artifacttype_create',
),
path(
r'artifacttype/add_popup/',
artifacttype_view.ArtifacttypeCreatePopup.as_view(),
name='artifacttype_add_popup',
),
path(
r'artifacttype/detail/<int:pk>/',
artifacttype_view.ArtifacttypeDetailView.as_view(),
name='artifacts_artifacttype_detail',
),
path(
r'artifacttype/update/<int:pk>/',
artifacttype_view.ArtifacttypeUpdateView.as_view(),
name='artifacts_artifacttype_update',
),
)
| 0 | 0 | 0 |
3f7cd28b00b51df823099bd4153d8f5599444380 | 270 | py | Python | mayan/apps/locales/icons.py | bonitobonita24/Mayan-EDMS | 7845fe0e1e83c81f5d227a16116397a3d3883b85 | [
"Apache-2.0"
] | 343 | 2015-01-05T14:19:35.000Z | 2018-12-10T19:07:48.000Z | mayan/apps/locales/icons.py | bonitobonita24/Mayan-EDMS | 7845fe0e1e83c81f5d227a16116397a3d3883b85 | [
"Apache-2.0"
] | 191 | 2015-01-03T00:48:19.000Z | 2018-11-30T09:10:25.000Z | mayan/apps/locales/icons.py | bonitobonita24/Mayan-EDMS | 7845fe0e1e83c81f5d227a16116397a3d3883b85 | [
"Apache-2.0"
] | 114 | 2015-01-08T20:21:05.000Z | 2018-12-10T19:07:53.000Z | from mayan.apps.appearance.classes import Icon
icon_user_locale_profile_detail = Icon(
driver_name='fontawesome', symbol='globe'
)
icon_user_locale_profile_edit = Icon(
driver_name='fontawesome-dual', primary_symbol='globe',
secondary_symbol='pencil-alt'
)
| 27 | 59 | 0.781481 | from mayan.apps.appearance.classes import Icon
icon_user_locale_profile_detail = Icon(
driver_name='fontawesome', symbol='globe'
)
icon_user_locale_profile_edit = Icon(
driver_name='fontawesome-dual', primary_symbol='globe',
secondary_symbol='pencil-alt'
)
| 0 | 0 | 0 |
6ab583d13ec98e93752ce61e59742114fe5f4689 | 4,643 | py | Python | result/arka/parse_result.py | MingzheWu418/plastering | 322531e934c3acf2ecc8f520b37a6d255b9959c2 | [
"MIT"
] | 29 | 2018-09-19T01:16:27.000Z | 2022-03-29T14:35:36.000Z | result/arka/parse_result.py | MingzheWu418/plastering | 322531e934c3acf2ecc8f520b37a6d255b9959c2 | [
"MIT"
] | 14 | 2019-04-12T18:37:36.000Z | 2022-02-10T00:27:55.000Z | result/arka/parse_result.py | MingzheWu418/plastering | 322531e934c3acf2ecc8f520b37a6d255b9959c2 | [
"MIT"
] | 14 | 2019-03-05T23:44:11.000Z | 2022-03-18T07:29:31.000Z | import os
import sys
import pdb
import re
from copy import deepcopy
from operator import itemgetter
import json
import pandas as pd
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, dir_path + '/../..')
from plastering.metadata_interface import *
from plastering.evaluator import *
target_building = 'sdh'
currfile = __file__
base_dir = os.path.dirname(currfile)
target_dir = base_dir + '/' + target_building
orig_cluster_sizes = {}
total_names = []
for filename in os.listdir(target_dir):
if not re.match('{0}-ORIGINAL-METADATA-\\d+$'.format(target_building.upper()),
filename):
continue
cid = get_number(filename)
with open(target_dir + '/' + filename, 'r') as fp:
names = fp.readlines()
orig_cluster_sizes[cid] = len(names)
total_names += names
total_names = list(set(total_names))
total_srcids = [get_srcid(name) for name in total_names]
curr_cluster_sizes = deepcopy(orig_cluster_sizes)
true_tagsets = {srcid: LabeledMetadata.objects(srcid=srcid).first().tagsets
for srcid in total_srcids}
true_points = {srcid: LabeledMetadata.objects(srcid=srcid).first().point_tagset
for srcid in total_srcids}
qualified_examples_nums = {}
for filename in os.listdir(target_dir):
if not re.match('l-ex-\\d+-out$', filename):
continue
cid = get_number(filename)
df = pd.read_csv(target_dir + '/' + filename)
df.columns = df.columns.str.strip()
coverages = df['Num Examples Thought to be fully qualified'].tolist()
qualified_examples_nums[cid] = coverages
inferred_points_dict = {i: {} for i in curr_cluster_sizes.keys()}
for filename in os.listdir(target_dir):
if not re.match('l-ex-\\d+-out-points-qualified$', filename):
continue
cid = get_number(filename)
with open(target_dir + '/' + filename, 'r') as fp:
lines = fp.readlines()
for line in lines:
ex_id = int(line.split(' ')[0])
if "'" not in line:
items = []
else:
items = line.split('[')[-1].split(']')[0][1:-1].split("', '")
inferred_points_dict[cid][ex_id] = items
pred = {}
curr_eids = {i: 0 for i in curr_cluster_sizes.keys()}
total_num = sum(orig_cluster_sizes.values())
pred_names = set()
cnt = 0
accs = []
f1s = []
mf1s = []
anymf1s = []
srcids = []
pred = {srcid: [] for srcid in total_srcids}
point_pred = {srcid: [] for srcid in total_srcids}
res = []
while not is_finished():
# select cluster
#max_cid = max(curr_cluster_sizes.items(), key=itemgetter(1))[0]
cnt += 1
max_cid = select_next_cid()
curr_eids[max_cid] += 1
curr_eid = curr_eids[max_cid]
found_names = set(inferred_points_dict[max_cid][curr_eid])
new_names = found_names - pred_names
new_srcids = [get_srcid(name) for name in new_names]
pred_names = pred_names.union(new_names)
curr_cluster_sizes[max_cid] = orig_cluster_sizes[max_cid] - len(found_names)
acc = len(pred_names) / total_num
print('{0}\tacc: {1}'.format(cnt, acc))
pred.update({srcid: LabeledMetadata.objects(srcid=srcid).first().tagsets
for srcid in new_srcids})
point_pred.update({
srcid: LabeledMetadata.objects(srcid=srcid).first().point_tagset
for srcid in new_srcids})
anymf1 = get_macro_f1(true_tagsets, pred)
mf1 = get_macro_f1(true_points, point_pred)
f1 = get_micro_f1(true_points, point_pred)
#mf1s.append(mf1)
#f1s.append(f1)
#anymf1s.append(anymf1)
#accs.append(acc)
#srcids.append(len(pred_names))
row = {
'metrics': {
'f1': f1,
'macrof1': mf1,
'accuracy': acc,
'macrof1-all': anymf1
},
'learning_srcids': cnt
}
res.append(row)
with open('result/pointonly_notransfer_arka_{0}_0.json'.format(target_building),
'w') as fp:
json.dump(res, fp)
| 30.748344 | 82 | 0.645272 | import os
import sys
import pdb
import re
from copy import deepcopy
from operator import itemgetter
import json
import pandas as pd
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, dir_path + '/../..')
from plastering.metadata_interface import *
from plastering.evaluator import *
target_building = 'sdh'
currfile = __file__
base_dir = os.path.dirname(currfile)
target_dir = base_dir + '/' + target_building
def get_number(s):
return int(re.findall('\\d+', s)[0])
def is_finished():
for cid, curr_eid in curr_eids.items():
if curr_eid < len(qualified_examples_nums[cid]) - 1:
return False
return True
def select_next_cid():
ordered_cids = [row[0] for row in
sorted(curr_cluster_sizes.items(),
key=itemgetter(1),
reverse=True)]
for cid in ordered_cids:
curr_eid = curr_eids[cid]
if curr_eid < len(qualified_examples_nums[cid]) - 1:
return cid
raise Exception('cannot find cids without finishing the algorithm. A bug')
def get_srcid(name):
return '_'.join(re.findall('[a-zA-Z0-9]+', name))
orig_cluster_sizes = {}
total_names = []
for filename in os.listdir(target_dir):
if not re.match('{0}-ORIGINAL-METADATA-\\d+$'.format(target_building.upper()),
filename):
continue
cid = get_number(filename)
with open(target_dir + '/' + filename, 'r') as fp:
names = fp.readlines()
orig_cluster_sizes[cid] = len(names)
total_names += names
total_names = list(set(total_names))
total_srcids = [get_srcid(name) for name in total_names]
curr_cluster_sizes = deepcopy(orig_cluster_sizes)
true_tagsets = {srcid: LabeledMetadata.objects(srcid=srcid).first().tagsets
for srcid in total_srcids}
true_points = {srcid: LabeledMetadata.objects(srcid=srcid).first().point_tagset
for srcid in total_srcids}
qualified_examples_nums = {}
for filename in os.listdir(target_dir):
if not re.match('l-ex-\\d+-out$', filename):
continue
cid = get_number(filename)
df = pd.read_csv(target_dir + '/' + filename)
df.columns = df.columns.str.strip()
coverages = df['Num Examples Thought to be fully qualified'].tolist()
qualified_examples_nums[cid] = coverages
inferred_points_dict = {i: {} for i in curr_cluster_sizes.keys()}
for filename in os.listdir(target_dir):
if not re.match('l-ex-\\d+-out-points-qualified$', filename):
continue
cid = get_number(filename)
with open(target_dir + '/' + filename, 'r') as fp:
lines = fp.readlines()
for line in lines:
ex_id = int(line.split(' ')[0])
if "'" not in line:
items = []
else:
items = line.split('[')[-1].split(']')[0][1:-1].split("', '")
inferred_points_dict[cid][ex_id] = items
pred = {}
curr_eids = {i: 0 for i in curr_cluster_sizes.keys()}
total_num = sum(orig_cluster_sizes.values())
pred_names = set()
cnt = 0
accs = []
f1s = []
mf1s = []
anymf1s = []
srcids = []
pred = {srcid: [] for srcid in total_srcids}
point_pred = {srcid: [] for srcid in total_srcids}
res = []
while not is_finished():
# select cluster
#max_cid = max(curr_cluster_sizes.items(), key=itemgetter(1))[0]
cnt += 1
max_cid = select_next_cid()
curr_eids[max_cid] += 1
curr_eid = curr_eids[max_cid]
found_names = set(inferred_points_dict[max_cid][curr_eid])
new_names = found_names - pred_names
new_srcids = [get_srcid(name) for name in new_names]
pred_names = pred_names.union(new_names)
curr_cluster_sizes[max_cid] = orig_cluster_sizes[max_cid] - len(found_names)
acc = len(pred_names) / total_num
print('{0}\tacc: {1}'.format(cnt, acc))
pred.update({srcid: LabeledMetadata.objects(srcid=srcid).first().tagsets
for srcid in new_srcids})
point_pred.update({
srcid: LabeledMetadata.objects(srcid=srcid).first().point_tagset
for srcid in new_srcids})
anymf1 = get_macro_f1(true_tagsets, pred)
mf1 = get_macro_f1(true_points, point_pred)
f1 = get_micro_f1(true_points, point_pred)
#mf1s.append(mf1)
#f1s.append(f1)
#anymf1s.append(anymf1)
#accs.append(acc)
#srcids.append(len(pred_names))
row = {
'metrics': {
'f1': f1,
'macrof1': mf1,
'accuracy': acc,
'macrof1-all': anymf1
},
'learning_srcids': cnt
}
res.append(row)
with open('result/pointonly_notransfer_arka_{0}_0.json'.format(target_building),
'w') as fp:
json.dump(res, fp)
| 642 | 0 | 92 |
580fe86ae0aa9c38a9e6907e1803cb156d5b2bdf | 7,285 | py | Python | learntools/ml_intermediate/ex3.py | roannav/learntools | 355a5df6a66562de62254b723da1a9389b9acc49 | [
"Apache-2.0"
] | null | null | null | learntools/ml_intermediate/ex3.py | roannav/learntools | 355a5df6a66562de62254b723da1a9389b9acc49 | [
"Apache-2.0"
] | null | null | null | learntools/ml_intermediate/ex3.py | roannav/learntools | 355a5df6a66562de62254b723da1a9389b9acc49 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import warnings
from learntools.core import *
Label = MultipartProblem(LabelA, LabelB)
Cardinality = MultipartProblem(CardinalityA, CardinalityB)
qvars = bind_exercises(globals(), [
Drop,
Label,
Cardinality,
OneHot
],
var_format='step_{n}',
)
__all__ = list(qvars)
| 41.158192 | 104 | 0.68744 | import pandas as pd
import warnings
from learntools.core import *
class Drop(CodingProblem):
_vars = ['drop_X_train', 'drop_X_valid']
_hint = ("Use the [`select_dtypes()`](https://pandas.pydata.org/pandas-"
"docs/stable/reference/api/pandas.DataFrame.select_dtypes.html) method "
"to drop all columns with the `object` dtype.")
_solution = CS(
"""# Drop columns in training and validation data
drop_X_train = X_train.select_dtypes(exclude=['object'])
drop_X_valid = X_valid.select_dtypes(exclude=['object'])
""")
def check(self, drop_X_train, drop_X_valid):
assert type(drop_X_train) == pd.core.frame.DataFrame, \
"`drop_X_train` is not a pandas DataFrame."
assert type(drop_X_valid) == pd.core.frame.DataFrame, \
"`drop_X_valid` is not a pandas DataFrame."
assert not(any((drop_X_train.dtypes == 'object').values)), \
"You still need to encode some of the categorical columns in your training data."
assert not(any((drop_X_valid.dtypes == 'object').values)), \
"You still need to encode some of the categorical columns in your validation data."
assert drop_X_train.shape[1] == 33, \
("`drop_X_train` should have 33 columns.")
assert drop_X_valid.shape[1] == 33, \
("`drop_X_valid` should have 33 columns.")
class LabelA(ThoughtExperiment):
_hint = ("Are there any values that appear in the validation data but not in the training data?")
_solution = ("Fitting a label encoder to a column in the training data creates a corresponding "
"integer-valued label for each unique value **that appears in the training data**. In "
"the case that the validation data contains values that don't also appear in the "
"training data, the encoder will throw an error, because these values won't have an "
"integer assigned to them. Notice that the `'Condition2'` "
"column in the validation data contains the values `'RRAn'` and `'RRNn'`, but these "
"don't appear in the training data -- thus, if we try to use a label encoder with "
"scikit-learn, the code will throw an error.")
class LabelB(CodingProblem):
_vars = ['label_X_train', 'label_X_valid']
_hint = ("Use the `LabelEncoder` class from scikit-learn. You should only encode the columns in "
"`good_label_cols`.")
_solution = CS(
"""# Drop categorical columns that will not be encoded
label_X_train = X_train.drop(bad_label_cols, axis=1)
label_X_valid = X_valid.drop(bad_label_cols, axis=1)
# Apply label encoder
label_encoder = LabelEncoder()
for col in set(good_label_cols):
label_X_train[col] = label_encoder.fit_transform(X_train[col])
label_X_valid[col] = label_encoder.transform(X_valid[col])
""")
def check(self, label_X_train, label_X_valid):
assert type(label_X_train) == pd.core.frame.DataFrame, \
"`label_X_train` is not a pandas DataFrame."
assert type(label_X_valid) == pd.core.frame.DataFrame, \
"`label_X_valid` is not a pandas DataFrame."
assert not(any((label_X_train.dtypes == 'object').values)), \
"You still need to encode some of the categorical columns in your training data."
assert not(any((label_X_valid.dtypes == 'object').values)), \
"You still need to encode some of the categorical columns in your validation data."
# remove 45 after nb update
assert label_X_train.shape[1] in [57, 45], \
"`label_X_train` does not have the correct number of columns."
# remove 45 after nb update
assert label_X_valid.shape[1] in [57, 45], \
"`label_X_valid` does not have the correct number of columns."
Label = MultipartProblem(LabelA, LabelB)
class CardinalityA(EqualityCheckProblem):
_vars = ['high_cardinality_numcols', 'num_cols_neighborhood']
_expected = [3, 25]
_hint = ("To one-hot encode a variable, we need one column for each unique entry.")
_solution = CS(
"""# How many categorical variables in the training data
# have cardinality greater than 10?
high_cardinality_numcols = 3
# How many columns are needed to one-hot encode the
# 'Neighborhood' variable in the training data?
num_cols_neighborhood = 25
""")
class CardinalityB(EqualityCheckProblem):
_vars = ['OH_entries_added', 'label_entries_added']
_expected = [990000, 0]
_hint = ("To calculate how many entries are added to the dataset through the one-hot encoding, "
"begin by calculating how many entries are needed to encode the categorical variable "
"(by multiplying the number of rows by the number of columns in the one-hot encoding). "
"Then, to obtain how many entries are **added** to the dataset, subtract the number "
"of entries in the original column.")
_solution = CS(
"""# How many entries are added to the dataset by
# replacing the column with a one-hot encoding?
OH_entries_added = 1e4*100 - 1e4
# How many entries are added to the dataset by
# replacing the column with a label encoding?
label_entries_added = 0
""")
Cardinality = MultipartProblem(CardinalityA, CardinalityB)
class OneHot(CodingProblem):
_vars = ['OH_X_train', 'OH_X_valid']
_hint = ("Begin by applying the one-hot encoder to the low cardinality columns in the "
"training and validation data in `X_train[low_cardinality_cols]` and "
"`X_valid[low_cardinality_cols]`, respectively.")
_solution = CS(
"""# Apply one-hot encoder to each column with categorical data
OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
OH_cols_train = pd.DataFrame(OH_encoder.fit_transform(X_train[low_cardinality_cols]))
OH_cols_valid = pd.DataFrame(OH_encoder.transform(X_valid[low_cardinality_cols]))
# One-hot encoding removed index; put it back
OH_cols_train.index = X_train.index
OH_cols_valid.index = X_valid.index
# Remove categorical columns (will replace with one-hot encoding)
num_X_train = X_train.drop(object_cols, axis=1)
num_X_valid = X_valid.drop(object_cols, axis=1)
# Add one-hot encoded columns to numerical features
OH_X_train = pd.concat([num_X_train, OH_cols_train], axis=1)
OH_X_valid = pd.concat([num_X_valid, OH_cols_valid], axis=1)
""")
def check(self, OH_X_train, OH_X_valid):
assert type(OH_X_train) == pd.core.frame.DataFrame, \
"`OH_X_train` is not a pandas DataFrame."
assert type(OH_X_valid) == pd.core.frame.DataFrame, \
"`OH_X_valid` is not a pandas DataFrame."
assert not(any((OH_X_train.dtypes == 'object').values)), \
"You still need to encode some of the categorical columns in your training data."
assert not(any((OH_X_valid.dtypes == 'object').values)), \
"You still need to encode some of the categorical columns in your validation data."
assert len(OH_X_train.columns) == 155, \
"`OH_X_train` should have 155 columns."
assert len(OH_X_valid.columns) == 155, \
"`OH_X_valid` should have 155 columns."
qvars = bind_exercises(globals(), [
Drop,
Label,
Cardinality,
OneHot
],
var_format='step_{n}',
)
__all__ = list(qvars)
| 2,451 | 4,378 | 138 |
adcc7eef90b09be43068eff5739a52723c4a565f | 976 | py | Python | src/vnc_me/controllers/connect.py | maizy/vnc-me | 644cbe7c58d5077b2a2c41145e088430c97860ee | [
"MIT"
] | null | null | null | src/vnc_me/controllers/connect.py | maizy/vnc-me | 644cbe7c58d5077b2a2c41145e088430c97860ee | [
"MIT"
] | null | null | null | src/vnc_me/controllers/connect.py | maizy/vnc-me | 644cbe7c58d5077b2a2c41145e088430c97860ee | [
"MIT"
] | null | null | null | # _*_ coding: utf-8 _*_
# Copyright (c) Nikita Kovaliov, maizy.ru, 2013
# See LICENSE.txt for details.
from tornado.web import asynchronous
from vnc_me.controllers import HttpHandler
from vnc_me.vnc_client import VncClient
| 27.885714 | 68 | 0.604508 | # _*_ coding: utf-8 _*_
# Copyright (c) Nikita Kovaliov, maizy.ru, 2013
# See LICENSE.txt for details.
from tornado.web import asynchronous
from vnc_me.controllers import HttpHandler
from vnc_me.vnc_client import VncClient
class Handler(HttpHandler):
@asynchronous
def post(self):
host = self.get_argument('host', None)
port = self.get_argument('port', None)
password = self.get_argument('password', '')
if not host or not port:
self.redirect('/?error=bad_params')
return
client = VncClient.get_client('main')
if client.running:
self.redirect('/?error=ever_running')
return
def _on_connect(success):
if success:
self.redirect('/?connected=true')
else:
self.redirect('/?error=unknown')
client.start(_on_connect, host, port,
password.encode('utf-8') if password else None)
| 676 | 51 | 23 |
5be6b16b88128604801bcc33e64d646652abc4ae | 2,318 | py | Python | tests/test_spacecurve.py | SPOCKnots/pyknotid | 514a3f0f64d980100dc5f1086551f2d809c14907 | [
"MIT"
] | 17 | 2019-02-07T11:39:38.000Z | 2022-03-31T13:14:29.000Z | tests/test_spacecurve.py | SPOCKnots/pyknotid | 514a3f0f64d980100dc5f1086551f2d809c14907 | [
"MIT"
] | 5 | 2017-11-10T15:12:30.000Z | 2021-11-01T16:36:22.000Z | tests/test_spacecurve.py | SPOCKnots/pyknotid | 514a3f0f64d980100dc5f1086551f2d809c14907 | [
"MIT"
] | 7 | 2017-11-10T14:23:46.000Z | 2021-03-28T06:05:04.000Z |
import pyknotid.spacecurves.spacecurve as sp
import pyknotid.make as mk
from functools import wraps
import os
from os import path
import numpy as np
import pytest
@pass_trefoil
@pass_trefoil
@pass_trefoil
@pass_trefoil
@pass_trefoil
@pass_trefoil
@pass_trefoil
@pass_trefoil
@pass_trefoil
@pass_trefoil
@pass_trefoil
@pass_trefoil
@pass_trefoil
@pass_trefoil
@pass_trefoil
@pass_trefoil
| 20.156522 | 75 | 0.689819 |
import pyknotid.spacecurves.spacecurve as sp
import pyknotid.make as mk
from functools import wraps
import os
from os import path
import numpy as np
import pytest
def pass_trefoil(func):
def new_func():
return func(sp.SpaceCurve(mk.trefoil()))
return new_func
@pass_trefoil
def test_init(k):
pass
@pass_trefoil
def test_copy(k):
k2 = k.copy()
assert np.all(k2.points == k.points)
assert k2.points is not k.points
@pass_trefoil
def test_points(k):
assert isinstance(k.points, np.ndarray)
@pass_trefoil
def test_translate(k):
pos = k.points[0]
k.translate([10., 20., 30.])
new_pos = k.points[0]
assert new_pos[0] - pos[0] == 10.
assert new_pos[1] - pos[1] == 20.
assert new_pos[2] - pos[2] == 30.
@pass_trefoil
def test_zero_centroid(k):
k.points += np.random.random(size=3) * np.random.random() * 10
k.zero_centroid()
assert np.all(np.average(k.points, axis=0) < 0.00001)
@pass_trefoil
def test_rotate(k):
k.rotate()
@pass_trefoil
def test_planar_writhe(k):
assert np.abs(k.planar_writhe()) == 3
@pass_trefoil
def test_writhe(k):
k.rotate()
w = k.writhe(100)
assert -3.6 < w < -3.4
@pass_trefoil
def test_gauss_code(k):
assert str(k.gauss_code(recalculate=True)) == '1+a,2-a,3+a,1-a,2+a,3-a'
@pass_trefoil
def test_reconstructed_space_curve(k):
k2 = k.reconstructed_space_curve()
assert k.planar_writhe() == k2.planar_writhe()
@pass_trefoil
def test_write_load(k):
k.to_json('test_trefoil.json')
k2 = sp.SpaceCurve.from_json('test_trefoil.json')
assert k2.planar_writhe() == k2.planar_writhe()
os.unlink('test_trefoil.json')
@pass_trefoil
def test_octree_simplify(k):
k.octree_simplify(runs=2)
@pass_trefoil
def test_arclength(k):
assert np.isclose(k.arclength(), 31.8512, atol=0.01)
@pass_trefoil
def test_rog(k):
assert np.isclose(k.radius_of_gyration(), 2.244798, atol=0.01)
@pass_trefoil
def test_smooth(k):
k.smooth()
@pass_trefoil
def test_compiled_vs_python_find_crossings(k):
try:
import pyknotid.spacecurves.chelpers
except ImportError:
return # chelpers not installed
g1 = k.gauss_code(recalculate=True, try_cython=True)
g2 = k.gauss_code(recalculate=True, try_cython=False)
assert str(g1) == str(g2)
| 1,530 | 0 | 376 |
3be12f5f6443ad94d4862814b4ddfa13ca970561 | 998 | py | Python | pcloudpy/gui/graphics/QVTKWindow.py | mmolero/pcloudpy | c8e4b342f9180374db97af3d87d60ece683b7bc0 | [
"BSD-3-Clause"
] | 39 | 2015-09-30T18:59:22.000Z | 2020-10-28T01:52:41.000Z | pcloudpy/gui/graphics/QVTKWindow.py | mmolero/pcloudpy | c8e4b342f9180374db97af3d87d60ece683b7bc0 | [
"BSD-3-Clause"
] | 3 | 2017-01-05T20:53:54.000Z | 2017-11-30T06:57:13.000Z | pcloudpy/gui/graphics/QVTKWindow.py | mmolero/pcloudpy | c8e4b342f9180374db97af3d87d60ece683b7bc0 | [
"BSD-3-Clause"
] | 19 | 2017-01-05T20:33:59.000Z | 2021-09-25T09:19:28.000Z | #Author: Miguel Molero <miguel.molero@gmail.com>
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from pcloudpy.gui.graphics.QVTKWidget import QVTKWidget
if __name__ == "__main__":
from vtk import vtkConeSource
from vtk import vtkPolyDataMapper, vtkActor
app = QApplication(['QVTKWindow'])
win = QVTKMainWindow()
cone = vtkConeSource()
cone.SetResolution(8)
coneMapper = vtkPolyDataMapper()
coneMapper.SetInput(cone.GetOutput())
coneActor = vtkActor()
coneActor.SetMapper(coneMapper)
win.vtkWidget.renderer.AddActor(coneActor)
# show the widget
win.show()
# start event processing
app.exec_() | 26.972973 | 55 | 0.709419 | #Author: Miguel Molero <miguel.molero@gmail.com>
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from pcloudpy.gui.graphics.QVTKWidget import QVTKWidget
class QVTKMainWindow(QMainWindow):
def __init__(self, parent = None):
super(QVTKMainWindow, self).__init__(parent)
self.vtkWidget = QVTKWidget(self)
self.setCentralWidget(self.vtkWidget)
self.setWindowTitle("QVTKMainWindow")
self.setGeometry(50,50, 800,800)
if __name__ == "__main__":
from vtk import vtkConeSource
from vtk import vtkPolyDataMapper, vtkActor
app = QApplication(['QVTKWindow'])
win = QVTKMainWindow()
cone = vtkConeSource()
cone.SetResolution(8)
coneMapper = vtkPolyDataMapper()
coneMapper.SetInput(cone.GetOutput())
coneActor = vtkActor()
coneActor.SetMapper(coneMapper)
win.vtkWidget.renderer.AddActor(coneActor)
# show the widget
win.show()
# start event processing
app.exec_() | 241 | 13 | 49 |
839732c105e90217381325571c730da38f09602e | 3,377 | py | Python | service/docs/source/conf.py | dannosliwcd/geopm | 3ec0d223e700350ff37f6d10adde7b9bfbdba286 | [
"BSD-3-Clause"
] | 2 | 2016-07-23T18:05:45.000Z | 2020-07-24T17:55:24.000Z | service/docs/source/conf.py | dannosliwcd/geopm | 3ec0d223e700350ff37f6d10adde7b9bfbdba286 | [
"BSD-3-Clause"
] | null | null | null | service/docs/source/conf.py | dannosliwcd/geopm | 3ec0d223e700350ff37f6d10adde7b9bfbdba286 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2015 - 2021, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import sphinx_rtd_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'GEOPM Service'
copyright = '2021, Intel (R) Corporation'
author = 'Intel (R) Corporation'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.napoleon',
'sphinx_rtd_theme',
]
napoleon_google_docstring = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_logo = 'https://geopm.github.io/images/geopm-logo-clear.png'
logo_only = True
| 39.267442 | 79 | 0.70151 | # Copyright (c) 2015 - 2021, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import sphinx_rtd_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'GEOPM Service'
copyright = '2021, Intel (R) Corporation'
author = 'Intel (R) Corporation'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.napoleon',
'sphinx_rtd_theme',
]
napoleon_google_docstring = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_logo = 'https://geopm.github.io/images/geopm-logo-clear.png'
logo_only = True
| 0 | 0 | 0 |
4d834666adfb7a39b9f9187dae673a2b0a66c916 | 980 | py | Python | jdxapi/utils/logger_resource.py | jobdataexchange/jdx-api | 7815a6463de56423c3b4196648607c4ebe56828c | [
"Apache-2.0"
] | null | null | null | jdxapi/utils/logger_resource.py | jobdataexchange/jdx-api | 7815a6463de56423c3b4196648607c4ebe56828c | [
"Apache-2.0"
] | 9 | 2019-12-26T17:39:58.000Z | 2022-01-13T01:59:49.000Z | jdxapi/utils/logger_resource.py | jobdataexchange/jdx-api | 7815a6463de56423c3b4196648607c4ebe56828c | [
"Apache-2.0"
] | null | null | null | import functools
import logging
from flask_restful import Resource
from flask import request
import json
| 28.823529 | 108 | 0.67551 | import functools
import logging
from flask_restful import Resource
from flask import request
import json
def failsafe_pp_json_obj(json_obj):
try:
return json.dumps(json_obj, indent=4)
except:
return json_obj
def log_input_output(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
logger = logging.getLogger('inputoutput')
# print(dir(request.))
user_metadata = f'IP: {request.remote_addr} - User Agent: {request.user_agent} - URL: {request.url}'
logger.info(f'{user_metadata} - Request Body: {failsafe_pp_json_obj(request.get_json())}')
# logger.info(failsafe_pp_json_obj(request.get_json()))
output = func(*args, **kwargs)
logger.info(f'{user_metadata} - Response Body: {failsafe_pp_json_obj(output.get_json())}')
return output
# restful.abort(401)
return wrapper
class LoggerResource(Resource):
method_decorators = [log_input_output]
| 748 | 53 | 72 |
7f2a71b1e414d3825cb38086a03f7cd11fd7e6ea | 259 | py | Python | spatialpooch/__init__.py | achapkowski/spatial-pooch | e2525678d1b5f6acadfb53de43d8a10cf30d6ec9 | [
"Apache-2.0"
] | 1 | 2020-04-02T16:44:03.000Z | 2020-04-02T16:44:03.000Z | spatialpooch/__init__.py | achapkowski/spatial-pooch | e2525678d1b5f6acadfb53de43d8a10cf30d6ec9 | [
"Apache-2.0"
] | null | null | null | spatialpooch/__init__.py | achapkowski/spatial-pooch | e2525678d1b5f6acadfb53de43d8a10cf30d6ec9 | [
"Apache-2.0"
] | null | null | null | from ._tabular import fetch_crime_data, fetch_traffic_data
from ._vector import (fetch_beach_access_data,
fetch_crime_shp_data,
fetch_family_resource_centers_data,
fetch_shipping_lanes_data) | 51.8 | 58 | 0.660232 | from ._tabular import fetch_crime_data, fetch_traffic_data
from ._vector import (fetch_beach_access_data,
fetch_crime_shp_data,
fetch_family_resource_centers_data,
fetch_shipping_lanes_data) | 0 | 0 | 0 |
7641bdb92e4dd1311b939ff97255a4f2bfe9d25c | 1,295 | py | Python | predict.py | don6105/OCR-Captcha-Recognition | f9d3088b4937218e2675ad19832cd6cdf333d683 | [
"Apache-2.0"
] | null | null | null | predict.py | don6105/OCR-Captcha-Recognition | f9d3088b4937218e2675ad19832cd6cdf333d683 | [
"Apache-2.0"
] | null | null | null | predict.py | don6105/OCR-Captcha-Recognition | f9d3088b4937218e2675ad19832cd6cdf333d683 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
import cv2
import numpy as np
import os
import process_img
import pickle
from shutil import copyfile
# model = cv2.ml.KNearest_create()
# model.load('model.xml')
model = cv2.ml.KNearest_load('model.xml')
img_area = 40 * 40
# 将序列化的内容加载到内存中
f = open('id_label_map.txt', 'rb')
try:
id_label_map = pickle.load(f)
except EOFError:
pass
f.close()
filenames = os.listdir('img')
for filename in filenames:
filelist = [ f for f in os.listdir('predict')]
for f in filelist:
os.remove(os.path.join('predict', f))
copyfile(os.path.join('img', filename), os.path.join('predict', filename))
img_captcha = cv2.imread(os.path.join('predict', filename))
process_img.run('predict', 'result', 1)
predict = sorted(os.listdir('result'))
r = []
for p in predict:
img = cv2.imread(os.path.join('result', p), cv2.IMREAD_GRAYSCALE)
sample = img.reshape((1, img_area)).astype(np.float32)
ret, results, neighbours, distances = model.findNearest(sample, k = 3)
label_id = int(results[0, 0])
label = id_label_map[label_id]
r.append(label)
print(' '.join(r))
cv2.imshow('image', img_captcha)
key = cv2.waitKey(0)
if key == 27:
exit()
else :
cv2.destroyAllWindows() | 25.9 | 78 | 0.643243 | #!/usr/bin/python3
import cv2
import numpy as np
import os
import process_img
import pickle
from shutil import copyfile
# model = cv2.ml.KNearest_create()
# model.load('model.xml')
model = cv2.ml.KNearest_load('model.xml')
img_area = 40 * 40
# 将序列化的内容加载到内存中
f = open('id_label_map.txt', 'rb')
try:
id_label_map = pickle.load(f)
except EOFError:
pass
f.close()
filenames = os.listdir('img')
for filename in filenames:
filelist = [ f for f in os.listdir('predict')]
for f in filelist:
os.remove(os.path.join('predict', f))
copyfile(os.path.join('img', filename), os.path.join('predict', filename))
img_captcha = cv2.imread(os.path.join('predict', filename))
process_img.run('predict', 'result', 1)
predict = sorted(os.listdir('result'))
r = []
for p in predict:
img = cv2.imread(os.path.join('result', p), cv2.IMREAD_GRAYSCALE)
sample = img.reshape((1, img_area)).astype(np.float32)
ret, results, neighbours, distances = model.findNearest(sample, k = 3)
label_id = int(results[0, 0])
label = id_label_map[label_id]
r.append(label)
print(' '.join(r))
cv2.imshow('image', img_captcha)
key = cv2.waitKey(0)
if key == 27:
exit()
else :
cv2.destroyAllWindows() | 0 | 0 | 0 |
bd4cecd93bd9c57a578f054d6684869c8da3f50d | 210 | py | Python | AlgorithmTest/PROGRAMMERS_PYTHON/Lv1/Prog_12934.py | bluesky0960/AlgorithmTest | 35e6c01b1c25bf13d4c034c047f3dd3b67f1578e | [
"MIT"
] | null | null | null | AlgorithmTest/PROGRAMMERS_PYTHON/Lv1/Prog_12934.py | bluesky0960/AlgorithmTest | 35e6c01b1c25bf13d4c034c047f3dd3b67f1578e | [
"MIT"
] | null | null | null | AlgorithmTest/PROGRAMMERS_PYTHON/Lv1/Prog_12934.py | bluesky0960/AlgorithmTest | 35e6c01b1c25bf13d4c034c047f3dd3b67f1578e | [
"MIT"
] | null | null | null | #https://programmers.co.kr/learn/courses/30/lessons/12934 | 23.333333 | 57 | 0.533333 | #https://programmers.co.kr/learn/courses/30/lessons/12934
def solution(n):
answer = 0
if int(n**0.5)**2 == n:
answer = (int(n**0.5)+1)**2
else:
answer = -1
return answer | 131 | 0 | 22 |
be56fe3a0855a11c83234a8075eb53f6c7ee860e | 12,531 | py | Python | main.py | sem-onyalo/knowledge-graph-loader | 7beadc3fe0f159e5386639d8fa9aeccffa23950c | [
"MIT"
] | null | null | null | main.py | sem-onyalo/knowledge-graph-loader | 7beadc3fe0f159e5386639d8fa9aeccffa23950c | [
"MIT"
] | null | null | null | main.py | sem-onyalo/knowledge-graph-loader | 7beadc3fe0f159e5386639d8fa9aeccffa23950c | [
"MIT"
] | null | null | null | import csv
import logging
import neo4j
import os
import uuid
from concurrent.futures import ThreadPoolExecutor
from pyopenie import OpenIE5
from queue import Empty, Queue
from spacy.lang.en import English
from time import sleep
from typing import List
ENCODING = "utf-8"
DATA_DIRECTORY = "./data"
CACHE_DIRECTORY = "cache/"
CACHED_CONNECTIONS_FILE = "entity_connections.cache"
CACHED_FILTERED_CONNECTIONS_FILE = "entity_connections_filtered.cache"
QUEUE_WAIT_TIMEOUT = 5
CONNECTION_BUILDER_THREADS = 5
RELATIONSHIP_EXTRACTION_SERVICE_RETRIES = 5
RELATIONSHIP_EXTRACTION_SERVICE_TIMEOUT = 3
RELATIONSHIP_EXTRACTION_SERVICE_URL = 'http://localhost:8000'
NEO4J_URL = "bolt://localhost:7687"
NEO4J_CREDENTIALS_FILE = ".credentials"
GRAPH_LOADER_THREADS = 1
nlp:English = None
extractor:OpenIE5 = None
sentence_queue:Queue = None
connection_list:List[EntityConnection] = None
query_queue:Queue = None
loader:Loader = None
connection_cache_source:int = 0
if __name__ == "__main__":
main() | 34.905292 | 168 | 0.677201 | import csv
import logging
import neo4j
import os
import uuid
from concurrent.futures import ThreadPoolExecutor
from pyopenie import OpenIE5
from queue import Empty, Queue
from spacy.lang.en import English
from time import sleep
from typing import List
ENCODING = "utf-8"
DATA_DIRECTORY = "./data"
CACHE_DIRECTORY = "cache/"
CACHED_CONNECTIONS_FILE = "entity_connections.cache"
CACHED_FILTERED_CONNECTIONS_FILE = "entity_connections_filtered.cache"
QUEUE_WAIT_TIMEOUT = 5
CONNECTION_BUILDER_THREADS = 5
RELATIONSHIP_EXTRACTION_SERVICE_RETRIES = 5
RELATIONSHIP_EXTRACTION_SERVICE_TIMEOUT = 3
RELATIONSHIP_EXTRACTION_SERVICE_URL = 'http://localhost:8000'
NEO4J_URL = "bolt://localhost:7687"
NEO4J_CREDENTIALS_FILE = ".credentials"
GRAPH_LOADER_THREADS = 1
class Document:
file_name:str
sentences:list
def __init__(self, file_name, sentences) -> None:
self.file_name = file_name
self.sentences = sentences
class DocumentSentence:
document:Document
sentence:str
def __init__(self, document, sentence) -> None:
self.document = document
self.sentence = sentence
class EntityConnection:
from_entity:str
to_entity:str
relationship:str
confidence:float
file_name:str
def __str__(self) -> str:
return f"from_entity={self.from_entity}, to_entity={self.to_entity}, relationship={self.relationship}, confidence={self.confidence}, file_name={self.file_name}"
def __eq__(self, __o: object) -> bool:
if isinstance(__o, self.__class__):
other:EntityConnection = __o
return (self.from_entity == other.from_entity
and self.to_entity == other.to_entity
and self.relationship == other.relationship
and self.confidence == other.confidence
and self.file_name == other.file_name)
else:
return False
class Neo4jAuth:
url:str
username:str
password:str
def __init__(self) -> None:
self.url = NEO4J_URL
with open(NEO4J_CREDENTIALS_FILE, encoding=ENCODING) as fd:
self.username = fd.readline().strip()
self.password = fd.readline().strip()
class Loader:
auth:Neo4jAuth
def __init__(self, auth:Neo4jAuth) -> None:
self.auth = auth
self.driver = neo4j.GraphDatabase.driver(self.auth.url, auth=(self.auth.username, self.auth.password))
def load_queries(self, queries:Queue) -> None:
with ThreadPoolExecutor(max_workers=GRAPH_LOADER_THREADS) as executor:
args = ((uuid.uuid4(), queries) for _ in range(GRAPH_LOADER_THREADS))
futures = executor.map(lambda p: self.load_query(*p), args)
for future in futures:
logging.debug(f"Load query thread result {future}")
def load_query(self, threadId:str, queries:Queue) -> None:
logging.info(f"[{threadId}] Loader thread started")
queries_loaded = 0
while True:
try:
query:str = queries.get(timeout=QUEUE_WAIT_TIMEOUT)
except Empty:
logging.info(f"[{threadId}] Loader thread exiting, queue empty, processed {queries_loaded} queries")
return queries_loaded, threadId
with self.driver.session() as session:
session.write_transaction((lambda tx, query: tx.run(query)), query)
queries_loaded += 1
nlp:English = None
extractor:OpenIE5 = None
sentence_queue:Queue = None
connection_list:List[EntityConnection] = None
query_queue:Queue = None
loader:Loader = None
connection_cache_source:int = 0
def init_logger(level=logging.DEBUG):
logging.basicConfig(
format="[%(asctime)s]\t[%(levelname)s]\t[%(name)s]\t%(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=level,
)
def init_cache():
cache_dir = os.path.join(DATA_DIRECTORY, CACHE_DIRECTORY)
if not os.path.isdir(cache_dir):
os.mkdir(cache_dir)
def init_sentencizer() -> None:
global nlp
nlp = English()
nlp.add_pipe("sentencizer")
def init_sentence_queue() -> None:
global sentence_queue
sentence_queue = Queue()
def init_connection_list() -> None:
global connection_list
cache_connections = get_cache_connections()
connection_list = cache_connections if cache_connections != None else list()
def init_query_queue() -> None:
global query_queue
query_queue = Queue()
def init_relationship_extractor() -> None:
global extractor
extractor = OpenIE5(RELATIONSHIP_EXTRACTION_SERVICE_URL)
def init_loader() -> None:
global loader
auth = Neo4jAuth()
loader = Loader(auth)
def cache_data(file:str) -> None:
path = os.path.join(DATA_DIRECTORY, CACHE_DIRECTORY, file)
with open(path, mode="w", encoding=ENCODING) as fd:
writer = csv.writer(fd)
for c in connection_list:
row = [c.from_entity, c.to_entity, c.relationship, c.confidence, c.file_name]
# TODO: fix extra CRLF at end of line
writer.writerow(row)
def cache_connections() -> None:
cache_data(CACHED_CONNECTIONS_FILE)
def cache_filtered_connections() -> None:
cache_data(CACHED_FILTERED_CONNECTIONS_FILE)
def get_cache_connections() -> List[EntityConnection]:
FROM_ENTITY_IDX = 0
TO_ENTITY_IDX = 1
RELATIONSHIP_IDX = 2
CONFIDENCE_IDX = 3
FILE_NAME_IDX = 4
path = os.path.join(DATA_DIRECTORY, CACHE_DIRECTORY, CACHED_CONNECTIONS_FILE)
if os.path.isfile(path):
connections = list()
with open(path, mode="r", encoding=ENCODING) as fd:
reader = csv.reader(fd)
for row in reader:
if len(row) == 0:
continue
connection = EntityConnection()
connection.from_entity = row[FROM_ENTITY_IDX]
connection.to_entity = row[TO_ENTITY_IDX]
connection.relationship = row[RELATIONSHIP_IDX]
connection.confidence = float(row[CONFIDENCE_IDX])
connection.file_name = row[FILE_NAME_IDX]
connections.append(connection)
return connections
def extract_sentences_from_data(data) -> list:
document = nlp(data)
return [s.text for s in document.sents]
def extract_data_from_file(file_path) -> str:
with open(file_path, encoding=ENCODING) as fd:
data = fd.read()
return data
def build_documents_from_files(data_files) -> List[Document]:
documents = list()
for data_file in data_files:
data = extract_data_from_file(data_file)
sentences = extract_sentences_from_data(data)
documents.append(Document(data_file, sentences))
return documents
def build_connection_from_extraction(extraction:dict, document:Document) -> None:
if len(extraction["extraction"]["arg2s"]) > 0:
connection = EntityConnection()
connection.from_entity = extraction["extraction"]["arg1"]["text"]
# TODO: add logic for handling multiple arg2s
connection.to_entity = extraction["extraction"]["arg2s"][0]["text"]
connection.relationship = extraction["extraction"]["rel"]["text"]
connection.confidence = float(extraction["confidence"])
connection.file_name = os.path.basename(document.file_name.replace("\\", os.sep))
connection_list.append(connection)
def build_connections_from_document(threadId:str) -> None:
logging.info(f"[{threadId}] Connection builder thread started")
sentences_processed = 0
while True:
try:
docSentence:DocumentSentence = sentence_queue.get(timeout=QUEUE_WAIT_TIMEOUT)
except Empty:
logging.info(f"[{threadId}] Connection builder thread exiting, queue empty, processed {sentences_processed} sentences")
return sentences_processed, threadId
got_extractions = False
current_try = RELATIONSHIP_EXTRACTION_SERVICE_RETRIES
while current_try > 0:
try:
extractions = extractor.extract(docSentence.sentence)
got_extractions = True
sentences_processed += 1
break
except Exception as e:
logging.debug(f"[{threadId}] Connection builder thread service exception on try {current_try}: {e}")
sleep(RELATIONSHIP_EXTRACTION_SERVICE_TIMEOUT)
current_try -= 1
if not got_extractions:
logging.error(f"[{threadId}] Connection builder thread skipping item, could not process sentence: {docSentence.sentence}")
continue
for extraction in extractions:
build_connection_from_extraction(extraction, docSentence.document)
def build_connections_from_documents(documents:List[Document]) -> List[EntityConnection]:
if len(connection_list) > 0:
logging.info("Skipping build connections, list populated by cache")
return
sentences_count = 0
for document in documents:
for sentence in document.sentences:
sentence_queue.put(DocumentSentence(document, sentence))
sentences_count += 1
sentences_processed = 0
with ThreadPoolExecutor(max_workers=CONNECTION_BUILDER_THREADS) as executor:
threadIds = [uuid.uuid4() for _ in range(CONNECTION_BUILDER_THREADS)]
futures = executor.map(build_connections_from_document, threadIds)
for future in futures:
logging.debug(f"Thread result {future}")
sentences_processed += int(future[0])
logging.info(f"{sentences_processed} of {sentences_count} sentences processed")
cache_connections()
def filter_connections_stop_words(connections:List[EntityConnection]):
i = 0
items_removed = 0
current_length = len(connections)
while i < current_length:
connection = connections[i]
if connection.from_entity.lower() in nlp.Defaults.stop_words or connection.to_entity.lower() in nlp.Defaults.stop_words:
logging.debug(f"removing connection for stop word: {connection}")
connections.remove(connection)
items_removed += 1
current_length -= 1
else:
i += 1
logging.info(f"{items_removed} entity connections removed because of stop words")
def filter_connections_dups(connections:List[EntityConnection]):
i = 0
items_removed = 0
no_dup_list = list()
current_length = len(connections)
while i < current_length:
connection = connections[i]
if connection in no_dup_list:
logging.debug(f"removing connection for duplicate: {connection}")
connections.remove(connection)
items_removed += 1
current_length -= 1
else:
i += 1
no_dup_list.append(connection)
logging.info(f"{items_removed} entity connections removed because of duplicates")
def filter_connections(connections:List[EntityConnection]):
length_before = len(connections)
filter_connections_dups(connections)
filter_connections_stop_words(connections)
length_after = len(connections)
logging.info(f"New length after filters: {length_after}, {length_before - length_after} items removed")
cache_filtered_connections()
def build_queries_from_connections(connections:List[EntityConnection], queries:Queue) -> None:
for connection in connections:
from_entity = connection.from_entity.replace('"', '\\"')
to_entity = connection.to_entity.replace('"', '\\"')
relationship = connection.relationship.replace('"', '\\"')
query = ""
query += f'MERGE (f:Entity {{ name: "{from_entity}" }}) '
query += f'MERGE (t:Entity {{ name: "{to_entity}" }}) '
query += f'MERGE (f)-[:RELATION {{ name: "{relationship}", confidence: {connection.confidence} }}]->(t);'
queries.put(query)
logging.debug(f"Built query {query}")
def main():
init_logger()
init_cache()
init_sentencizer()
init_sentence_queue()
init_connection_list()
init_query_queue()
init_relationship_extractor()
init_loader()
data_files = [os.path.join(DATA_DIRECTORY, f) for f in os.listdir(DATA_DIRECTORY) if os.path.isfile(os.path.join(DATA_DIRECTORY, f))]
documents = build_documents_from_files(data_files)
build_connections_from_documents(documents)
filter_connections(connection_list)
build_queries_from_connections(connection_list, query_queue)
loader.load_queries(query_queue)
if __name__ == "__main__":
main() | 10,461 | 437 | 644 |
9c2ab7ec270dc8209d5b75adadfdc279f77d4441 | 270 | py | Python | polls/scraping_db/wadi_fashion.py | young-ha713/TeamProject | f98bbfbb7cab1b292f83f48a926dc6fd8b3eaf84 | [
"Apache-2.0"
] | null | null | null | polls/scraping_db/wadi_fashion.py | young-ha713/TeamProject | f98bbfbb7cab1b292f83f48a926dc6fd8b3eaf84 | [
"Apache-2.0"
] | null | null | null | polls/scraping_db/wadi_fashion.py | young-ha713/TeamProject | f98bbfbb7cab1b292f83f48a926dc6fd8b3eaf84 | [
"Apache-2.0"
] | 2 | 2021-08-12T01:51:32.000Z | 2021-08-17T05:16:37.000Z | import pandas as pd
df = pd.read_excel('C:/Users/gkdud/PycharmProjects/TeamProject/Scraping/files/fashion_scraping.xlsx')
import sqlite3
connect = sqlite3.connect('./wadizdb.sqlite3')
df.to_sql('table_fashion', connect, if_exists='append', index=False)
connect.close() | 33.75 | 101 | 0.788889 | import pandas as pd
df = pd.read_excel('C:/Users/gkdud/PycharmProjects/TeamProject/Scraping/files/fashion_scraping.xlsx')
import sqlite3
connect = sqlite3.connect('./wadizdb.sqlite3')
df.to_sql('table_fashion', connect, if_exists='append', index=False)
connect.close() | 0 | 0 | 0 |
15506c9d3b917a6a1bc46dffb7f880578de51951 | 5,177 | py | Python | static_compress/mixin.py | RentFreeMedia/django-static-compress | b56940b9246714401bdd0b24c2f9595419dc6671 | [
"MIT"
] | 8 | 2017-10-23T07:32:43.000Z | 2019-12-16T16:25:02.000Z | static_compress/mixin.py | RentFreeMedia/django-static-compress | b56940b9246714401bdd0b24c2f9595419dc6671 | [
"MIT"
] | 90 | 2018-06-02T07:37:29.000Z | 2022-03-31T13:01:24.000Z | static_compress/mixin.py | RentFreeMedia/django-static-compress | b56940b9246714401bdd0b24c2f9595419dc6671 | [
"MIT"
] | 8 | 2018-07-25T13:56:40.000Z | 2022-02-11T17:18:17.000Z | import os
from os.path import getatime, getctime, getmtime
import errno
from django.core.exceptions import ImproperlyConfigured
from . import compressors
__all__ = ["CompressMixin"]
DEFAULT_METHODS = ["gz", "br"]
METHOD_MAPPING = {
"gz": compressors.ZopfliCompressor,
"br": compressors.BrotliCompressor,
"gz+zlib": compressors.ZlibCompressor,
# gz+zlib and gz cannot be used at the same time, because they produce the same file extension.
}
| 40.131783 | 114 | 0.618119 | import os
from os.path import getatime, getctime, getmtime
import errno
from django.core.exceptions import ImproperlyConfigured
from . import compressors
__all__ = ["CompressMixin"]
DEFAULT_METHODS = ["gz", "br"]
METHOD_MAPPING = {
"gz": compressors.ZopfliCompressor,
"br": compressors.BrotliCompressor,
"gz+zlib": compressors.ZlibCompressor,
# gz+zlib and gz cannot be used at the same time, because they produce the same file extension.
}
class CompressMixin:
allowed_extensions = []
compress_methods = []
keep_original = True
compressors = []
minimum_kb = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# We access Django settings lately here, to allow our app to be imported without
# defining DJANGO_SETTINGS_MODULE.
from django.conf import settings
self.allowed_extensions = getattr(settings, "STATIC_COMPRESS_FILE_EXTS", ["js", "css", "svg"])
self.compress_methods = getattr(settings, "STATIC_COMPRESS_METHODS", DEFAULT_METHODS)
self.keep_original = getattr(settings, "STATIC_COMPRESS_KEEP_ORIGINAL", True)
self.minimum_kb = getattr(settings, "STATIC_COMPRESS_MIN_SIZE_KB", 30)
valid = [i for i in self.compress_methods if i in METHOD_MAPPING]
if not valid:
raise ImproperlyConfigured("No valid method is defined in STATIC_COMPRESS_METHODS setting.")
if "gz" in valid and "gz+zlib" in valid:
raise ImproperlyConfigured("STATIC_COMPRESS_METHODS: gz and gz+zlib cannot be used at the same time.")
self.compressors = [METHOD_MAPPING[k]() for k in valid]
def get_alternate_compressed_path(self, name):
for compressor in self.compressors:
ext = compressor.extension
if name.endswith(".{}".format(ext)):
path = self.path(name)
else:
path = self.path("{}.{}".format(name, ext))
if os.path.exists(path):
return path
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), path)
def get_accessed_time(self, name):
if self.keep_original:
return super().get_accessed_time(name)
return self._datetime_from_timestamp(getatime(self.get_alternate_compressed_path(name)))
def get_created_time(self, name):
if self.keep_original:
return super().get_created_time(name)
return self._datetime_from_timestamp(getctime(self.get_alternate_compressed_path(name)))
def get_modified_time(self, name):
if self.keep_original:
return super().get_modified_time(name)
alt = self.get_alternate_compressed_path(name)
return self._datetime_from_timestamp(getmtime(alt))
def post_process(self, paths, dry_run=False, **options):
if hasattr(super(), "post_process"):
yield from super().post_process(paths, dry_run, **options)
if dry_run:
return
for name in paths.keys():
if not self._is_file_allowed(name):
continue
source_storage, path = paths[name]
# Process if file is big enough
if os.path.getsize(self.path(path)) < self.minimum_kb * 1024:
continue
src_mtime = source_storage.get_modified_time(path)
dest_path = self._get_dest_path(path)
with self._open(dest_path) as file:
for compressor in self.compressors:
dest_compressor_path = "{}.{}".format(dest_path, compressor.extension)
# Check if the original file has been changed.
# If not, no need to compress again.
full_compressed_path = self.path(dest_compressor_path)
try:
dest_mtime = self._datetime_from_timestamp(getmtime(full_compressed_path))
file_is_unmodified = dest_mtime.replace(microsecond=0) >= src_mtime.replace(microsecond=0)
except FileNotFoundError:
file_is_unmodified = False
if file_is_unmodified:
continue
# Delete old gzip file, or Nginx will pick the old file to serve.
# Note: Django won't overwrite the file, so we have to delete it ourselves.
if self.exists(dest_compressor_path):
self.delete(dest_compressor_path)
out = compressor.compress(path, file)
if out:
self._save(dest_compressor_path, out)
if not self.keep_original:
self.delete(name)
yield dest_path, dest_compressor_path, True
file.seek(0)
def _get_dest_path(self, path):
if hasattr(self, "hashed_name"):
return self.hashed_name(path)
return path
def _is_file_allowed(self, file):
for extension in self.allowed_extensions:
if file.endswith("." + extension):
return True
return False
| 4,357 | 334 | 23 |
d136bbf6cfe49c89ba2ae55848ff79c2754f45bf | 1,433 | py | Python | cvat/apps/engine/ddln/tasks/vls/persistence/csv.py | daedaleanai/cvat | d0df08c3f66a39324bd0b82683ee4cef05ed9c53 | [
"MIT"
] | 1 | 2021-07-12T20:34:31.000Z | 2021-07-12T20:34:31.000Z | cvat/apps/engine/ddln/tasks/vls/persistence/csv.py | daedaleanai/cvat | d0df08c3f66a39324bd0b82683ee4cef05ed9c53 | [
"MIT"
] | 8 | 2020-05-04T09:44:13.000Z | 2021-10-14T12:54:40.000Z | cvat/apps/engine/ddln/tasks/vls/persistence/csv.py | daedaleanai/cvat | d0df08c3f66a39324bd0b82683ee4cef05ed9c53 | [
"MIT"
] | 1 | 2020-07-15T09:30:13.000Z | 2020-07-15T09:30:13.000Z | from ..models import RunwayPoint, Runway
| 31.152174 | 124 | 0.681089 | from ..models import RunwayPoint, Runway
def iterate_runways(reader):
for row in reader._reader:
runway_id, full_visible, *pts_data = row
full_visible = bool(int(full_visible))
assert len(pts_data) == 18 # 6 points, each point having 3 values
start_left, start_right = _from_row(pts_data[0:3]), _from_row(pts_data[3:6])
end_left, end_right = _from_row(pts_data[6:9]), _from_row(pts_data[9:12])
threshold_left, threshold_right = _from_row(pts_data[12:15]), _from_row(pts_data[15:18])
yield Runway(runway_id, full_visible, start_left, start_right, end_left, end_right, threshold_left, threshold_right)
def write_runway(runway, writer):
writer._writer.writerow((
runway.id,
int(runway.full_visible),
*_as_row(runway.start_left),
*_as_row(runway.start_right),
*_as_row(runway.end_left),
*_as_row(runway.end_right),
*_as_row(runway.threshold_left),
*_as_row(runway.threshold_right),
))
def _from_row(row):
visible, x, y = row
visible = bool(int(visible))
x = _deserialize(x)
y = _deserialize(y)
return RunwayPoint(visible, x, y)
def _as_row(point):
return int(point.visible), _serialize(point.x), _serialize(point.y)
def _serialize(coordinate):
return '' if coordinate is None else coordinate
def _deserialize(input):
return None if input == '' else int(input)
| 1,248 | 0 | 138 |
0069c9e2e22ac4791dcf0c3156a7d75e7be45e71 | 346 | py | Python | old files/problem0009.py | kmarcini/Project-Euler-Python | d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3 | [
"BSD-3-Clause"
] | null | null | null | old files/problem0009.py | kmarcini/Project-Euler-Python | d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3 | [
"BSD-3-Clause"
] | null | null | null | old files/problem0009.py | kmarcini/Project-Euler-Python | d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3 | [
"BSD-3-Clause"
] | null | null | null | ###########################
# Project Euler Problem 9
# Special Pythagorean triplet
#
# Code by Kevin Marciniak
###########################
total = 1000
product = 0
for c in range(1, 1000):
for b in range(1, c):
for a in range(1, b):
if (a + b + c) == 1000:
if ((a * a) + (b * b)) == (c * c):
product = a * b * c
print(product)
| 18.210526 | 38 | 0.471098 | ###########################
# Project Euler Problem 9
# Special Pythagorean triplet
#
# Code by Kevin Marciniak
###########################
total = 1000
product = 0
for c in range(1, 1000):
for b in range(1, c):
for a in range(1, b):
if (a + b + c) == 1000:
if ((a * a) + (b * b)) == (c * c):
product = a * b * c
print(product)
| 0 | 0 | 0 |
a147e22d5aeaabe35ccc4c56ea5539f536e24407 | 3,685 | py | Python | lbrynet/wallet/ledger.py | ttkopec/lbry | 03415415ed397730e6f691f527f51b429a834ed5 | [
"MIT"
] | null | null | null | lbrynet/wallet/ledger.py | ttkopec/lbry | 03415415ed397730e6f691f527f51b429a834ed5 | [
"MIT"
] | 110 | 2018-11-26T05:41:35.000Z | 2021-08-03T15:37:20.000Z | lbrynet/wallet/ledger.py | ttkopec/lbry | 03415415ed397730e6f691f527f51b429a834ed5 | [
"MIT"
] | 1 | 2018-09-20T22:15:59.000Z | 2018-09-20T22:15:59.000Z | import logging
from six import int2byte
from binascii import unhexlify
from twisted.internet import defer
from .resolve import Resolver
from lbryschema.error import URIParseError
from lbryschema.uri import parse_lbry_uri
from torba.baseledger import BaseLedger
from .account import Account
from .network import Network
from .database import WalletDatabase
from .transaction import Transaction
from .header import Headers, UnvalidatedHeaders
log = logging.getLogger(__name__)
| 34.12037 | 101 | 0.735414 | import logging
from six import int2byte
from binascii import unhexlify
from twisted.internet import defer
from .resolve import Resolver
from lbryschema.error import URIParseError
from lbryschema.uri import parse_lbry_uri
from torba.baseledger import BaseLedger
from .account import Account
from .network import Network
from .database import WalletDatabase
from .transaction import Transaction
from .header import Headers, UnvalidatedHeaders
log = logging.getLogger(__name__)
class MainNetLedger(BaseLedger):
name = 'LBRY Credits'
symbol = 'LBC'
network_name = 'mainnet'
account_class = Account
database_class = WalletDatabase
headers_class = Headers
network_class = Network
transaction_class = Transaction
secret_prefix = int2byte(0x1c)
pubkey_address_prefix = int2byte(0x55)
script_address_prefix = int2byte(0x7a)
extended_public_key_prefix = unhexlify('0488b21e')
extended_private_key_prefix = unhexlify('0488ade4')
max_target = 0x0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
genesis_hash = '9c89283ba0f3227f6c03b70216b9f665f0118d5e0fa729cedf4fb34d6a34f463'
genesis_bits = 0x1f00ffff
target_timespan = 150
default_fee_per_byte = 50
default_fee_per_name_char = 200000
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fee_per_name_char = self.config.get('fee_per_name_char', self.default_fee_per_name_char)
@property
def resolver(self):
return Resolver(self.headers.claim_trie_root, self.headers.height, self.transaction_class,
hash160_to_address=self.hash160_to_address, network=self.network)
@defer.inlineCallbacks
def resolve(self, page, page_size, *uris):
for uri in uris:
try:
parse_lbry_uri(uri)
except URIParseError as err:
defer.returnValue({'error': err.message})
resolutions = yield self.network.get_values_for_uris(self.headers.hash().decode(), *uris)
return (yield self.resolver._handle_resolutions(resolutions, uris, page, page_size))
@defer.inlineCallbacks
def get_claim_by_claim_id(self, claim_id):
result = (yield self.network.get_claims_by_ids(claim_id)).pop(claim_id, {})
return (yield self.resolver.get_certificate_and_validate_result(result))
@defer.inlineCallbacks
def get_claim_by_outpoint(self, txid, nout):
claims = (yield self.network.get_claims_in_tx(txid)) or []
for claim in claims:
if claim['nout'] == nout:
return (yield self.resolver.get_certificate_and_validate_result(claim))
return 'claim not found'
@defer.inlineCallbacks
def start(self):
yield super().start()
yield defer.DeferredList([
a.maybe_migrate_certificates() for a in self.accounts
])
class TestNetLedger(MainNetLedger):
network_name = 'testnet'
pubkey_address_prefix = int2byte(111)
script_address_prefix = int2byte(196)
extended_public_key_prefix = unhexlify('043587cf')
extended_private_key_prefix = unhexlify('04358394')
class RegTestLedger(MainNetLedger):
network_name = 'regtest'
headers_class = UnvalidatedHeaders
pubkey_address_prefix = int2byte(111)
script_address_prefix = int2byte(196)
extended_public_key_prefix = unhexlify('043587cf')
extended_private_key_prefix = unhexlify('04358394')
max_target = 0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
genesis_hash = '6e3fcf1299d4ec5d79c3a4c91d624a4acf9e2e173d95a1a0504f677669687556'
genesis_bits = 0x207fffff
target_timespan = 1
| 1,336 | 1,796 | 69 |
226c32b59ca6bfd5663903c81b43098c8f2f31df | 1,870 | py | Python | emontranslator_v0.py | mkaiserpm/emonpython | f5e7d70b83f1c528fc485556464ce1b4f8553d9b | [
"MIT"
] | null | null | null | emontranslator_v0.py | mkaiserpm/emonpython | f5e7d70b83f1c528fc485556464ce1b4f8553d9b | [
"MIT"
] | null | null | null | emontranslator_v0.py | mkaiserpm/emonpython | f5e7d70b83f1c528fc485556464ce1b4f8553d9b | [
"MIT"
] | null | null | null | '''
Created on 01.05.2017
@author: mario
Emontranslator
Receive messages from serial/uart
Generate JSON Emon Input Messages
Insert via EMON API / APIKEY to emoncms on locahost (running on pi)
'''
import serial
import httplib
import time
domain = "localhost"
emoncmspath = "emoncms"
apikey = "2eba96e51f6b41534f52110ad063b0c8"
nodeid = 10
conn = httplib.HTTPConnection(domain)
# Set this to the serial port of your emontx and baud rate, 9600 is standard emontx baud rate
ser = serial.Serial('/dev/ttyS0', 9600)
while 1:
try:
# Read in line of readings from serial / uart
linestr = ser.readline()
linestr = linestr.rstrip()
#print linestr
nodeid,temp,humid,voltage=parseLine(linestr)
if nodeid:
params = ("{temp:%.2f,humid:%.2f,voltage:%.2f}"%(temp,humid,voltage))
print params
print "nodeid:"+str(nodeid)
# Send to emoncms
conn.connect()
conn.request("GET", "/"+emoncmspath+"/input/post.json?&node="+str(nodeid)+"&json="+params+"&apikey="+apikey)
response = conn.getresponse()
print response.read()
except KeyboardInterrupt:
raise
except Exception as e:
print e.__doc__
print e.message
pass
time.sleep(1)
| 26.338028 | 120 | 0.578075 | '''
Created on 01.05.2017
@author: mario
Emontranslator
Receive messages from serial/uart
Generate JSON Emon Input Messages
Insert via EMON API / APIKEY to emoncms on locahost (running on pi)
'''
import serial
import httplib
import time
domain = "localhost"
emoncmspath = "emoncms"
apikey = "2eba96e51f6b41534f52110ad063b0c8"
nodeid = 10
conn = httplib.HTTPConnection(domain)
# Set this to the serial port of your emontx and baud rate, 9600 is standard emontx baud rate
ser = serial.Serial('/dev/ttyS0', 9600)
def parseLine(linestr):
nodeid = None
temp = 0
humid = 0
voltage = 0
if "BAD-CRC" not in linestr:
if len(linestr) > 2:
data = linestr.split(" ")
print linestr
print data
nodeid = int(data[0])
temp = float(data[1])
temp = temp/ 100.
humid = float(data[2])
humid = humid / 100.
voltage = float(data[3])
voltage = voltage / 100.
return nodeid,temp,humid,voltage
while 1:
try:
# Read in line of readings from serial / uart
linestr = ser.readline()
linestr = linestr.rstrip()
#print linestr
nodeid,temp,humid,voltage=parseLine(linestr)
if nodeid:
params = ("{temp:%.2f,humid:%.2f,voltage:%.2f}"%(temp,humid,voltage))
print params
print "nodeid:"+str(nodeid)
# Send to emoncms
conn.connect()
conn.request("GET", "/"+emoncmspath+"/input/post.json?&node="+str(nodeid)+"&json="+params+"&apikey="+apikey)
response = conn.getresponse()
print response.read()
except KeyboardInterrupt:
raise
except Exception as e:
print e.__doc__
print e.message
pass
time.sleep(1)
| 529 | 0 | 23 |
515fea6b09cfd40afa2a167b2e7a719933d9dd52 | 3,970 | py | Python | client.py | AvaCity/avacity-async | d600bf3914ab13c918d33a17b1c70df8d2af6913 | [
"BSD-3-Clause"
] | 10 | 2020-08-14T03:41:13.000Z | 2021-12-12T20:04:08.000Z | client.py | oopss1k/1 | 78fc1d2cdd001630d80a065a4243e1745f6ba876 | [
"BSD-3-Clause"
] | 6 | 2020-08-28T17:27:55.000Z | 2022-02-25T20:39:02.000Z | client.py | AvaCity/avacity-async | d600bf3914ab13c918d33a17b1c70df8d2af6913 | [
"BSD-3-Clause"
] | 5 | 2020-08-13T20:40:16.000Z | 2022-02-25T20:28:43.000Z | import logging
import asyncio
import binascii
import time
import struct
from ipaddress import ip_network, ip_address
import protocol
import const
PUFFIN_SUB = ["107.178.32.0/20", "45.33.128.0/20", "101.127.206.0/23",
"101.127.208.0/23"]
| 32.276423 | 79 | 0.517884 | import logging
import asyncio
import binascii
import time
import struct
from ipaddress import ip_network, ip_address
import protocol
import const
PUFFIN_SUB = ["107.178.32.0/20", "45.33.128.0/20", "101.127.206.0/23",
"101.127.208.0/23"]
def is_puffin(ip):
for net in PUFFIN_SUB:
net = ip_network(net)
if ip_address(ip) in net:
return True
return False
class Client():
def __init__(self, server):
self.server = server
self.user_data = {}
self.uid = None
self.drop = False
self.debug = False
self.encrypted = False
self.compressed = False
self.checksummed = False
self.room = ""
self.position = (0, 0)
self.dimension = 4
self.state = 0
self.action_tag = ""
self.canyon_lid = None
self.last_msg = time.time()
async def handle(self, reader, writer):
self.reader = reader
self.writer = writer
self.addr = writer.get_extra_info('peername')[0]
if not is_puffin(self.addr):
self.user_data["ip_address"] = self.addr
buffer = b""
while True:
await asyncio.sleep(0.2)
try:
data = await reader.read(1024)
except OSError:
break
if not data:
break
data = protocol.BytesWithPosition(buffer+data)
buffer = b""
if data.hex() == "3c706f6c6963792d66696c652d726571756573742f3e00":
writer.write(const.XML + b"\x00")
await writer.drain()
continue
while len(data) - data.pos > 4:
length = data.read_i32()
if len(data) - data.pos < length:
data.pos = 0
break
try:
final_data = protocol.processFrame(data.read(length), True)
except Exception:
print("Произошла ошибка у "+self.uid)
data.pos = len(data)
break
if final_data:
try:
await self.server.process_data(final_data, self)
except Exception as e:
logging.exception("Ошибка при обработке данных")
if len(data) - data.pos > 0:
buffer = data.read(len(data) - data.pos)
await self._close_connection()
async def send(self, msg, type_=34):
if self.drop:
return
data = struct.pack(">b", type_)
data += protocol.encodeArray(msg)
data = self._make_header(data) + data
try:
self.writer.write(data)
await self.writer.drain()
except (BrokenPipeError, ConnectionResetError, AssertionError,
TimeoutError, OSError, AttributeError):
self.writer.close()
def _make_header(self, msg):
header_length = 1
mask = 0
if self.encrypted:
mask |= (1 << 1)
if self.compressed:
mask |= (1 << 2)
if self.checksummed:
mask |= (1 << 3)
header_length += 4
buf = struct.pack(">i", len(msg)+header_length)
buf += struct.pack(">B", mask)
if self.checksummed:
buf += struct.pack(">I", binascii.crc32(msg))
return buf
async def _close_connection(self):
self.drop = True
self.writer.close()
if self.uid:
if self.uid in self.server.online:
del self.server.online[self.uid]
if self.room:
await self.server.modules["h"].leave_room(self)
if self.uid in self.server.inv:
self.server.inv[self.uid].expire = time.time()+30
await self.server.redis.set(f"uid:{self.uid}:lvt",
int(time.time()))
del self
| 3,582 | -6 | 180 |
96e5618a2b4dd65a44b72e0d8d469e32ed2b883d | 894 | py | Python | geotrek/trekking/filters.py | pierreloicq/Geotrek-admin | 00cd29f29843f2cc25e5a3c7372fcccf14956887 | [
"BSD-2-Clause"
] | null | null | null | geotrek/trekking/filters.py | pierreloicq/Geotrek-admin | 00cd29f29843f2cc25e5a3c7372fcccf14956887 | [
"BSD-2-Clause"
] | null | null | null | geotrek/trekking/filters.py | pierreloicq/Geotrek-admin | 00cd29f29843f2cc25e5a3c7372fcccf14956887 | [
"BSD-2-Clause"
] | null | null | null | from django.utils.translation import ugettext_lazy as _
from mapentity.filters import MapEntityFilterSet
from geotrek.core.filters import TopologyFilter
from .models import Trek, POI, Service
| 27.090909 | 78 | 0.671141 | from django.utils.translation import ugettext_lazy as _
from mapentity.filters import MapEntityFilterSet
from geotrek.core.filters import TopologyFilter
from .models import Trek, POI, Service
class TrekFilterSet(MapEntityFilterSet):
class Meta:
model = Trek
fields = ['published', 'difficulty', 'duration', 'themes', 'networks',
'practice', 'accessibilities', 'route', 'labels',
'structure', 'source', 'portal', 'reservation_system']
class POITrekFilter(TopologyFilter):
queryset = Trek.objects.existing()
class POIFilterSet(MapEntityFilterSet):
trek = POITrekFilter(label=_("Trek"), required=False)
class Meta:
model = POI
fields = ['published', 'type', 'trek', 'structure']
class ServiceFilterSet(MapEntityFilterSet):
class Meta:
model = Service
fields = ['type', 'structure']
| 0 | 605 | 92 |
6f9cf2d8cd0d99cb21323c1e981144539e4f1b93 | 255,228 | py | Python | gbpservice/neutron/tests/unit/services/grouppolicy/test_apic_mapping.py | ashutosh-mishra/my-test | 51c82af293f291b9182204392e7d21bda27786d1 | [
"Apache-2.0"
] | null | null | null | gbpservice/neutron/tests/unit/services/grouppolicy/test_apic_mapping.py | ashutosh-mishra/my-test | 51c82af293f291b9182204392e7d21bda27786d1 | [
"Apache-2.0"
] | null | null | null | gbpservice/neutron/tests/unit/services/grouppolicy/test_apic_mapping.py | ashutosh-mishra/my-test | 51c82af293f291b9182204392e7d21bda27786d1 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import re
import sys
import mock
import netaddr
import webob.exc
from apic_ml2.neutron.db import port_ha_ipaddress_binding as ha_ip_db
from apic_ml2.neutron.tests.unit.ml2.drivers.cisco.apic import (
test_cisco_apic_common as mocked)
from apicapi import apic_mapper
from neutron.agent import securitygroups_rpc as sg_cfg
from neutron.common import rpc as n_rpc
from neutron import context
from neutron.db import api as db_api
from neutron.db import db_base_plugin_v2 as n_db
from neutron.db import model_base
from neutron.extensions import portbindings
from neutron import manager
from opflexagent import constants as ocst
from oslo_config import cfg
from oslo_serialization import jsonutils
from gbpservice.neutron.plugins.ml2.drivers.grouppolicy.apic import driver
from gbpservice.neutron.services.grouppolicy import (
group_policy_context as p_context)
from gbpservice.neutron.services.grouppolicy import config
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import (
apic_mapping as amap)
from gbpservice.neutron.services.l3_router import l3_apic
from gbpservice.neutron.tests.unit.services.grouppolicy import (
test_resource_mapping as test_rmd)
APIC_L2_POLICY = 'l2_policy'
APIC_L3_POLICY = 'l3_policy'
APIC_POLICY_RULE_SET = 'policy_rule_set'
APIC_POLICY_TARGET_GROUP = 'policy_target_group'
APIC_POLICY_RULE = 'policy_rule'
APIC_EXTERNAL_RID = '1.0.0.1'
APIC_EXTERNAL_EPG = 'ext-epg'
APIC_PRE_L3OUT_TENANT = 'common'
APIC_PRE_VRF_TENANT = APIC_PRE_L3OUT_TENANT
APIC_PRE_VRF = 'pre-vrf'
AGENT_TYPE = ocst.AGENT_TYPE_OPFLEX_OVS
AGENT_CONF = {'alive': True, 'binary': 'somebinary',
'topic': 'sometopic', 'agent_type': AGENT_TYPE,
'configurations': {'opflex_networks': None,
'bridge_mappings': {'physnet1': 'br-eth1'}}}
AGENT_TYPE_DVS = driver.AGENT_TYPE_DVS
AGENT_CONF_DVS = {'alive': True, 'binary': 'somebinary',
'topic': 'sometopic', 'agent_type': AGENT_TYPE_DVS,
'configurations': {'opflex_networks': None}}
BOOKED_PORT_VALUE = 'myBookedPort'
class FakeNetworkContext(object):
"""To generate network context for testing purposes only."""
@property
@property
class FakePortContext(object):
"""To generate port context for testing purposes only."""
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
# TODO(ivar): verify rule intersection with hierarchical PRS happens
# on APIC
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
| 48.002257 | 79 | 0.584552 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import re
import sys
import mock
import netaddr
import webob.exc
from apic_ml2.neutron.db import port_ha_ipaddress_binding as ha_ip_db
from apic_ml2.neutron.tests.unit.ml2.drivers.cisco.apic import (
test_cisco_apic_common as mocked)
from apicapi import apic_mapper
from neutron.agent import securitygroups_rpc as sg_cfg
from neutron.common import rpc as n_rpc
from neutron import context
from neutron.db import api as db_api
from neutron.db import db_base_plugin_v2 as n_db
from neutron.db import model_base
from neutron.extensions import portbindings
from neutron import manager
from opflexagent import constants as ocst
from oslo_config import cfg
from oslo_serialization import jsonutils
from gbpservice.neutron.plugins.ml2.drivers.grouppolicy.apic import driver
from gbpservice.neutron.services.grouppolicy import (
group_policy_context as p_context)
from gbpservice.neutron.services.grouppolicy import config
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import (
apic_mapping as amap)
from gbpservice.neutron.services.l3_router import l3_apic
from gbpservice.neutron.tests.unit.services.grouppolicy import (
test_resource_mapping as test_rmd)
APIC_L2_POLICY = 'l2_policy'
APIC_L3_POLICY = 'l3_policy'
APIC_POLICY_RULE_SET = 'policy_rule_set'
APIC_POLICY_TARGET_GROUP = 'policy_target_group'
APIC_POLICY_RULE = 'policy_rule'
APIC_EXTERNAL_RID = '1.0.0.1'
APIC_EXTERNAL_EPG = 'ext-epg'
APIC_PRE_L3OUT_TENANT = 'common'
APIC_PRE_VRF_TENANT = APIC_PRE_L3OUT_TENANT
APIC_PRE_VRF = 'pre-vrf'
AGENT_TYPE = ocst.AGENT_TYPE_OPFLEX_OVS
AGENT_CONF = {'alive': True, 'binary': 'somebinary',
'topic': 'sometopic', 'agent_type': AGENT_TYPE,
'configurations': {'opflex_networks': None,
'bridge_mappings': {'physnet1': 'br-eth1'}}}
AGENT_TYPE_DVS = driver.AGENT_TYPE_DVS
AGENT_CONF_DVS = {'alive': True, 'binary': 'somebinary',
'topic': 'sometopic', 'agent_type': AGENT_TYPE_DVS,
'configurations': {'opflex_networks': None}}
BOOKED_PORT_VALUE = 'myBookedPort'
def echo(context, string, prefix=''):
return prefix + string
class MockCallRecorder(mock.Mock):
recorded_call_set = set()
def __call__(self, *args, **kwargs):
self.recorded_call_set.add(self.generate_entry(*args, **kwargs))
return mock.Mock()
def call_happened_with(self, *args, **kwargs):
return self.generate_entry(*args, **kwargs) in self.recorded_call_set
def generate_entry(self, *args, **kwargs):
return args, tuple((x, kwargs[x]) for x in sorted(kwargs.keys()))
class ApicMappingTestCase(
test_rmd.ResourceMappingTestCase,
mocked.ControllerMixin, mocked.ConfigMixin):
def setUp(self, sc_plugin=None, nat_enabled=True,
pre_existing_l3out=False, default_agent_conf=True,
ml2_options=None):
self.saved_apicapi = sys.modules["apicapi"]
sys.modules["apicapi"] = mock.Mock()
if default_agent_conf:
self.agent_conf = AGENT_CONF
cfg.CONF.register_opts(sg_cfg.security_group_opts, 'SECURITYGROUP')
config.cfg.CONF.set_override('enable_security_group', False,
group='SECURITYGROUP')
n_rpc.create_connection = mock.Mock()
amap.ApicMappingDriver.get_apic_manager = mock.Mock(
return_value=mock.MagicMock(
name_mapper=mock.Mock(),
ext_net_dict={},
per_tenant_nat_epg=False))
self.set_up_mocks()
ml2_opts = ml2_options or {
'mechanism_drivers': ['apic_gbp'],
'type_drivers': ['opflex'],
'tenant_network_types': ['opflex']
}
mock.patch('gbpservice.neutron.services.grouppolicy.drivers.cisco.'
'apic.apic_mapping.ApicMappingDriver.'
'_setup_rpc_listeners').start()
nova_client = mock.patch(
'gbpservice.neutron.services.grouppolicy.drivers.cisco.'
'apic.nova_client.NovaClient.get_server').start()
vm = mock.Mock()
vm.name = 'someid'
nova_client.return_value = vm
super(ApicMappingTestCase, self).setUp(
policy_drivers=['implicit_policy', 'apic', 'chain_mapping'],
ml2_options=ml2_opts, sc_plugin=sc_plugin)
engine = db_api.get_engine()
model_base.BASEV2.metadata.create_all(engine)
plugin = manager.NeutronManager.get_plugin()
plugin.remove_networks_from_down_agents = mock.Mock()
plugin.is_agent_down = mock.Mock(return_value=False)
self.driver = manager.NeutronManager.get_service_plugins()[
'GROUP_POLICY'].policy_driver_manager.policy_drivers['apic'].obj
self.l3plugin = l3_apic.ApicGBPL3ServicePlugin()
amap.ApicMappingDriver.get_base_synchronizer = mock.Mock()
self.driver.name_mapper.name_mapper = mock.Mock()
self.driver.name_mapper.name_mapper.tenant = echo
self.driver.name_mapper.name_mapper.l2_policy = echo
self.driver.name_mapper.name_mapper.l3_policy = echo
self.driver.name_mapper.name_mapper.policy_rule_set = echo
self.driver.name_mapper.name_mapper.policy_rule = echo
self.driver.name_mapper.name_mapper.app_profile.return_value = (
mocked.APIC_AP)
self.driver.name_mapper.name_mapper.policy_target_group = echo
self.driver.name_mapper.name_mapper.external_policy = echo
self.driver.name_mapper.name_mapper.external_segment = echo
self.driver.name_mapper.name_mapper.pre_existing = echo
self.driver.apic_manager.apic.transaction = self.fake_transaction
self.driver.notifier = mock.Mock()
self.driver.apic_manager.ext_net_dict = {}
amap.apic_manager.TENANT_COMMON = 'common'
amap.apic_manager.CP_ENTRY = 'os-entry'
self.common_tenant = amap.apic_manager.TENANT_COMMON
self.nat_enabled = nat_enabled
self.driver.l3out_vlan_alloc = mock.Mock()
self.pre_l3out = pre_existing_l3out
self.non_apic_network = False
def echo2(string):
return string
if self.pre_l3out:
self.orig_query_l3out_info = self.driver._query_l3out_info
self.driver._query_l3out_info = mock.Mock()
self.driver._query_l3out_info.return_value = {
'l3out_tenant': apic_mapper.ApicName(APIC_PRE_L3OUT_TENANT),
'vrf_name': APIC_PRE_VRF,
'vrf_tenant': APIC_PRE_VRF_TENANT,
# fake l3out response from APIC for testing purpose only
'l3out': ([{u'l3extExtEncapAllocator': {}},
{u'l3extInstP': {}},
{u'l3extRtBDToOut': {}},
{u'l3extRsOutToBDPublicSubnetHolder': {}},
{u'l3extRsNdIfPol': {u'tDn': u'',
u'tnNdIfPolName': u''}},
{u'l3extRsDampeningPol':
{u'tDn': u'', u'tnRtctrlProfileName': u''}},
{u'ospfRsIfPol': {u'tDn': u'',
u'tnOspfIfPolName': u''}},
{u'l3extRsEngressQosDppPol':
{u'tDn': u'', u'tnQosDppPolName': u''}},
{u'bfdRsIfPol': {u'tDn': u'',
u'tnBfdIfPolName': u''}},
{u'bgpRsPeerPfxPol': {u'tDn': u'',
u'tnBgpPeerPfxPolName': u''}},
{u'eigrpRsIfPol': {u'tDn': u'',
u'tnEigrpIfPolName': u''}},
{u'l3extLNodeP': {u'attributes':
{u'dn': u'uni/tn-common/out-supported/lnodep-Leaf3-4_NP',
u'lcOwn': u'local', u'name': u'Leaf3-4_NP',
u'targetDscp': u'unspecified', u'configIssues': u'',
u'stateQual': u'', u'tCl': u'', u'tContextDn': u'',
u'tRn': u'', u'type': u'', u'rType': u'', u'state': u'',
u'forceResolve': u'', u'tag': u'yellow-green',
u'monPolDn': u'', u'modTs': u'', u'uid': u'15374',
u'encap': u'unknown', u'addr': u'0.0.0.0'},
u'children': [{u'l3extLIfP':
{u'children': [{u'l3extRsPathL3OutAtt':
{u'attributes':
{u'encap': u'vlan-3101',
u'ifInstT': u'sub-interface'
}}}]}}
]}},
{u'l3extRsEctx':
{u'attributes':
{u'dn': u'uni/tn-common/out-supported/rsectx',
u'tDn': u'', u'tnFvCtxName': u'default'}}}])}
self.trimmed_l3out = [{}, {}, {}, {},
{u'l3extRsNdIfPol':
{u'tnNdIfPolName': u''}},
{u'l3extRsDampeningPol':
{u'tnRtctrlProfileName': u''}},
{u'ospfRsIfPol': {u'tnOspfIfPolName': u''}},
{u'l3extRsEngressQosDppPol':
{u'tnQosDppPolName': u''}},
{u'bfdRsIfPol': {u'tnBfdIfPolName': u''}},
{u'bgpRsPeerPfxPol': {u'tnBgpPeerPfxPolName': u''}},
{u'eigrpRsIfPol': {u'tnEigrpIfPolName': u''}},
{u'l3extLNodeP':
{u'attributes':
{u'dn': u'uni/tn-test-tenant/out-Shd-Sub/lnodep-Leaf3-4_NP'},
u'children': [{u'l3extLIfP':
{u'children': [{u'l3extRsPathL3OutAtt':
{u'attributes':
{u'ifInstT':
u'sub-interface',
u'encap': 'vlan-999'
}}}]}}]}},
{u'l3extRsEctx':
{u'attributes':
{u'dn': u'uni/tn-test-tenant/out-Shd-Sub/rsectx',
u'tnFvCtxName': u'myl3p'}}}]
self.driver.apic_manager.apic.fvTenant.rn = echo2
self.driver.apic_manager.apic.l3extOut.rn = echo2
self.driver.l3out_vlan_alloc.reserve_vlan.return_value = 999
self.driver.apic_manager.apic.fvTenant.name = echo2
self.driver.apic_manager.apic.fvCtx.name = echo2
self._db_plugin = n_db.NeutronDbPluginV2()
def tearDown(self):
sys.modules["apicapi"] = self.saved_apicapi
super(ApicMappingTestCase, self).tearDown()
def _build_external_dict(self, name, cidr_exposed, is_edge_nat=False):
ext_info = {
'enable_nat': 'True' if self.nat_enabled else 'False'
}
if self.pre_l3out:
ext_info['preexisting'] = 'True'
ext_info['external_epg'] = APIC_EXTERNAL_EPG
else:
ext_info.update({
'switch': mocked.APIC_EXT_SWITCH,
'port': mocked.APIC_EXT_MODULE + '/' + mocked.APIC_EXT_PORT,
'encap': mocked.APIC_EXT_ENCAP,
'router_id': APIC_EXTERNAL_RID,
'gateway_ip': str(netaddr.IPNetwork(cidr_exposed)[1]),
'cidr_exposed': cidr_exposed})
if is_edge_nat:
ext_info['edge_nat'] = 'true'
ext_info['vlan_range'] = '2000:2010'
return {name: ext_info}
def _mock_external_dict(self, data, is_edge_nat=False):
self.driver.apic_manager.ext_net_dict = {}
for x in data:
self.driver.apic_manager.ext_net_dict.update(
self._build_external_dict(x[0], x[1], is_edge_nat=is_edge_nat))
def _create_simple_policy_rule(self, direction='bi', protocol='tcp',
port_range=80, shared=False,
action_type='allow', action_value=None):
cls = self.create_policy_classifier(
direction=direction, protocol=protocol,
port_range=port_range, shared=shared)['policy_classifier']
action = self.create_policy_action(
action_type=action_type, shared=shared,
action_value=action_value)['policy_action']
return self.create_policy_rule(
policy_classifier_id=cls['id'], policy_actions=[action['id']],
shared=shared)['policy_rule']
def _bind_port_to_host(self, port_id, host):
data = {'port': {'binding:host_id': host,
'device_owner': 'compute:',
'device_id': 'someid'}}
return super(ApicMappingTestCase, self)._bind_port_to_host(
port_id, host, data=data)
def _bind_dhcp_port_to_host(self, port_id, host):
data = {'port': {'binding:host_id': host,
'device_owner': 'network:dhcp',
'device_id': 'someid'}}
return super(ApicMappingTestCase, self)._bind_port_to_host(
port_id, host, data=data)
class ApicMappingVlanTestCase(ApicMappingTestCase):
def setUp(self, **kwargs):
config.cfg.CONF.set_override(
'network_vlan_ranges', ['physnet1:100:200'], group='ml2_type_vlan')
kwargs['ml2_options'] = {
'mechanism_drivers': ['apic_gbp', 'openvswitch'],
'type_drivers': ['vlan'],
'tenant_network_types': ['vlan']
}
kwargs['default_agent_conf'] = False
super(ApicMappingVlanTestCase, self).setUp(**kwargs)
self.non_apic_network = True
def _get_ptg_shadow_net(self, ptg):
net = self._list_resource('networks', self.api,
tenant_id=ptg['tenant_id'],
name=self.driver._get_ptg_shadow_network_name(ptg))
net = net['networks']
if net:
return net[0]
def _get_ptg_shadow_subnet(self, ptg):
shadow_net = self._get_ptg_shadow_net(ptg)
if shadow_net:
return shadow_net['subnets'][0]
class TestPolicyTarget(ApicMappingTestCase):
def test_policy_target_port_deleted_on_apic(self):
ptg = self.create_policy_target_group()['policy_target_group']
subnet = self._get_object('subnets',
self._get_ptg_shadow_subnet(ptg) if self.non_apic_network
else ptg['subnets'][0],
self.api)
with self.port(subnet=subnet) as port:
self._bind_port_to_host(port['port']['id'], 'h1')
pt = self.create_policy_target(
policy_target_group_id=ptg['id'], port_id=port['port']['id'])
self.delete_policy_target(pt['policy_target']['id'])
self.assertTrue(self.driver.notifier.port_update.called)
def test_policy_target_delete_no_port(self):
ptg = self.create_policy_target_group()['policy_target_group']
subnet = self._get_object('subnets',
self._get_ptg_shadow_subnet(ptg) if self.non_apic_network
else ptg['subnets'][0],
self.api)
with self.port(subnet=subnet) as port:
self._bind_port_to_host(port['port']['id'], 'h1')
pt = self.create_policy_target(
policy_target_group_id=ptg['id'], port_id=port['port']['id'])
res = self.new_delete_request('ports', port['port']['id'],
self.fmt).get_response(self.api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
self.delete_policy_target(pt['policy_target']['id'],
expected_res_status=404)
def test_delete_policy_target_notification_no_apic_network(self):
ptg = self.create_policy_target_group(
name="ptg1")['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt1['port_id'], 'h1')
# Implicit port will be deleted with the PT
self.delete_policy_target(pt1['id'], expected_res_status=204)
# No notification needed
self.assertFalse(self.driver.notifier.port_update.called)
self.driver.notifier.port_update.reset_mock()
subnet = self._get_object('subnets',
self._get_ptg_shadow_subnet(ptg) if self.non_apic_network
else ptg['subnets'][0],
self.api)
with self.port(subnet=subnet) as port:
# Create EP with bound port
port = self._bind_port_to_host(port['port']['id'], 'h1')
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'], port_id=port['port']['id'])
# Explicit port won't be deleted with PT
self.delete_policy_target(pt1['policy_target']['id'],
expected_res_status=204)
# Issue notification for the agent
self.assertTrue(self.driver.notifier.port_update.called)
def test_get_vrf_details(self):
l3p = self.create_l3_policy(name='myl3')['l3_policy']
details = self.driver.get_vrf_details(
context.get_admin_context(),
vrf_id=l3p['id'], host='h1')
self.assertEqual(l3p['id'], details['l3_policy_id'])
pool = set([l3p['ip_pool']])
if 'proxy_ip_pool' in l3p:
pool.add(l3p['proxy_ip_pool'])
self.assertEqual(pool, set(details['vrf_subnets']))
self.assertEqual(l3p['tenant_id'], details['vrf_tenant'])
self.assertEqual(l3p['id'], details['vrf_name'])
def _do_test_get_gbp_details(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
self.driver.apic_manager.ext_net_dict[
'supported']['host_pool_cidr'] = '192.168.200.1/24'
es = self.create_external_segment(name='supported',
cidr='192.168.0.2/24',
expected_res_status=201, shared=True)['external_segment']
self.create_nat_pool(external_segment_id=es['id'],
ip_pool='20.20.20.0/24')
l3p = self.create_l3_policy(name='myl3',
external_segments={es['id']: ['']})['l3_policy']
l2p = self.create_l2_policy(name='myl2',
l3_policy_id=l3p['id'])['l2_policy']
nsp = self.create_network_service_policy(
network_service_params=[
{"type": "ip_pool", "value": "nat_pool", "name": "test"}])[
'network_service_policy']
ptg = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p['id'],
network_service_policy_id=nsp['id'])['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt1['port_id'], 'h1')
mapping = self.driver.get_gbp_details(context.get_admin_context(),
device='tap%s' % pt1['port_id'], host='h1')
req_mapping = self.driver.request_endpoint_details(
context.get_admin_context(),
request={'device': 'tap%s' % pt1['port_id'], 'host': 'h1',
'timestamp': 0, 'request_id': 'request_id'})
self.assertEqual(mapping, req_mapping['gbp_details'])
self.assertEqual(pt1['port_id'], mapping['port_id'])
self.assertEqual(ptg['id'], mapping['endpoint_group_name'])
self.assertEqual('someid', mapping['vm-name'])
self.assertTrue(mapping['enable_dhcp_optimization'])
self.assertEqual(1, len(mapping['subnets']))
subnet = self._get_object('subnets', ptg['subnets'][0], self.api)
self.assertEqual(subnet['subnet']['cidr'],
mapping['subnets'][0]['cidr'])
self.assertEqual(1, len(mapping['floating_ip']))
fip = mapping['floating_ip'][0]
self.assertEqual(pt1['port_id'], fip['port_id'])
self.assertEqual("NAT-epg-%s" % es['id'], fip['nat_epg_name'])
self.assertEqual(
(es['tenant_id'] if self.driver.per_tenant_nat_epg
else self.common_tenant),
fip['nat_epg_tenant'])
self.assertEqual(l3p['tenant_id'], mapping['vrf_tenant'])
self.assertEqual(l3p['id'], mapping['vrf_name'])
if 'proxy_ip_pool' in l3p:
self.assertEqual([l3p['ip_pool'], l3p['proxy_ip_pool']],
mapping['vrf_subnets'])
else:
self.assertEqual([l3p['ip_pool']], mapping['vrf_subnets'])
self.assertEqual(1, len(mapping['host_snat_ips']))
self.assertEqual(es['name'],
mapping['host_snat_ips'][0]['external_segment_name'])
self.assertEqual("192.168.200.1",
mapping['host_snat_ips'][0]['gateway_ip'])
self.assertEqual("192.168.200.2",
mapping['host_snat_ips'][0]['host_snat_ip'])
self.assertEqual(24, mapping['host_snat_ips'][0]['prefixlen'])
# Verify Neutron details
self.assertEqual(pt1['port_id'],
req_mapping['neutron_details']['port_id'])
# Create event on a second host to verify that the SNAT
# port gets created for this second host
pt2 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt2['port_id'], 'h1')
mapping = self.driver.get_gbp_details(context.get_admin_context(),
device='tap%s' % pt2['port_id'], host='h2')
self.assertEqual(pt2['port_id'], mapping['port_id'])
self.assertEqual(1, len(mapping['host_snat_ips']))
self.assertEqual(es['name'],
mapping['host_snat_ips'][0]['external_segment_name'])
self.assertEqual("192.168.200.1",
mapping['host_snat_ips'][0]['gateway_ip'])
self.assertEqual("192.168.200.3",
mapping['host_snat_ips'][0]['host_snat_ip'])
self.assertEqual(24, mapping['host_snat_ips'][0]['prefixlen'])
def test_get_gbp_details(self):
self._do_test_get_gbp_details()
def test_get_gbp_details_ptne(self):
self.driver.per_tenant_nat_epg = True
self._do_test_get_gbp_details()
def test_get_snat_ip_for_vrf(self):
TEST_VRF1 = 'testvrf1'
TEST_VRF2 = 'testvrf2'
self._mock_external_dict([('supported', '192.168.0.2/24')])
self.driver.apic_manager.ext_net_dict[
'supported']['host_pool_cidr'] = '192.168.200.1/24'
es = self.create_external_segment(name='supported',
cidr='192.168.0.2/24',
expected_res_status=201, shared=False)['external_segment']
self.create_nat_pool(external_segment_id=es['id'],
ip_pool='20.20.20.0/24')
l3p = self.create_l3_policy(name='myl3',
external_segments={es['id']: ['']})['l3_policy']
l2p = self.create_l2_policy(name='myl2',
l3_policy_id=l3p['id'])['l2_policy']
nsp = self.create_network_service_policy(
network_service_params=[
{"type": "ip_pool", "value": "nat_pool", "name": "test"}])[
'network_service_policy']
ptg = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p['id'],
network_service_policy_id=nsp['id'])['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt1['port_id'], 'h1')
subnet = self._db_plugin.get_subnet(context.get_admin_context(),
es['subnet_id'])
network = self._db_plugin.get_network(context.get_admin_context(),
subnet['network_id'])
details = self.driver.get_snat_ip_for_vrf(context.get_admin_context(),
TEST_VRF1, network, es_name=es['name'])
self.assertEqual(es['name'],
details['external_segment_name'])
self.assertEqual("192.168.200.1",
details['gateway_ip'])
self.assertEqual("192.168.200.2",
details['host_snat_ip'])
self.assertEqual(24, details['prefixlen'])
# Verify that the same VRF returns the same SNAT IP
details2 = self.driver.get_snat_ip_for_vrf(context.get_admin_context(),
TEST_VRF1, network, es_name=es['name'])
self.assertEqual(details, details2)
# Create event on a second VRF to verify that the SNAT
# port gets created for this second VRF
pt2 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt2['port_id'], 'h1')
details = self.driver.get_snat_ip_for_vrf(context.get_admin_context(),
TEST_VRF2, network, es_name = es['name'])
self.assertEqual(es['name'],
details['external_segment_name'])
self.assertEqual("192.168.200.1",
details['gateway_ip'])
self.assertEqual("192.168.200.3",
details['host_snat_ip'])
self.assertEqual(24, details['prefixlen'])
def test_snat_pool_subnet_deletion(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
self.driver.apic_manager.ext_net_dict[
'supported']['host_pool_cidr'] = '192.168.200.1/24'
es = self.create_external_segment(name='supported',
cidr='192.168.0.2/24',
expected_res_status=201, shared=False)['external_segment']
admin_ctx = context.get_admin_context()
ext_net_id = self._db_plugin.get_subnet(
admin_ctx, es['subnet_id'])['network_id']
l3p = self.create_l3_policy(name='myl3',
external_segments={es['id']: ['']})['l3_policy']
l2p = self.create_l2_policy(name='myl2',
l3_policy_id=l3p['id'])['l2_policy']
ptg = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p['id'])['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt1['port_id'], 'h1')
mapping = self.driver.get_gbp_details(context.get_admin_context(),
device='tap%s' % pt1['port_id'], host='h1')
self.assertEqual(pt1['port_id'], mapping['port_id'])
self.assertEqual(1, len(mapping['host_snat_ips']))
self.assertEqual(es['name'],
mapping['host_snat_ips'][0]['external_segment_name'])
self.assertEqual("192.168.200.1",
mapping['host_snat_ips'][0]['gateway_ip'])
self.assertEqual("192.168.200.2",
mapping['host_snat_ips'][0]['host_snat_ip'])
self.assertEqual(24, mapping['host_snat_ips'][0]['prefixlen'])
self.update_l3_policy(l3p['id'], external_segments={},
expected_res_status=200)
subnet_filter = {'name': [amap.HOST_SNAT_POOL],
'network_id': [ext_net_id]}
internal_subnets = self._db_plugin.get_subnets(
admin_ctx, filters=subnet_filter)
self.assertEqual(1, len(internal_subnets))
self.delete_external_segment(es['id'],
expected_res_status=webob.exc.HTTPNoContent.code)
internal_subnets = self._db_plugin.get_subnets(
admin_ctx, filters=subnet_filter)
self.assertEqual(0, len(internal_subnets))
def test_snat_port_ip_loss(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
self.driver.apic_manager.ext_net_dict[
'supported']['host_pool_cidr'] = '192.168.200.1/24'
es = self.create_external_segment(name='supported',
cidr='192.168.0.2/24', shared=False)['external_segment']
admin_ctx = context.get_admin_context()
ext_net_id = self._db_plugin.get_subnet(
admin_ctx, es['subnet_id'])['network_id']
l3p = self.create_l3_policy(name='myl3',
external_segments={es['id']: ['']})['l3_policy']
l2p = self.create_l2_policy(name='myl2',
l3_policy_id=l3p['id'])['l2_policy']
ptg = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p['id'])['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt1['port_id'], 'h1')
mapping = self.driver.get_gbp_details(admin_ctx,
device='tap%s' % pt1['port_id'], host='h1')
self.assertEqual(1, len(mapping['host_snat_ips']))
snat_ports = self._db_plugin.get_ports(admin_ctx,
filters={'name': [amap.HOST_SNAT_POOL_PORT],
'network_id': [ext_net_id],
'device_id': ['h1']})
self._db_plugin.update_port(admin_ctx,
snat_ports[0]['id'], {'port': {'fixed_ips': []}})
mapping = self.driver.get_gbp_details(admin_ctx,
device='tap%s' % pt1['port_id'], host='h1')
self.assertEqual(0, len(mapping['host_snat_ips']))
def test_ip_address_owner_update(self):
l3p = self.create_l3_policy(name='myl3')['l3_policy']
l2p = self.create_l2_policy(name='myl2',
l3_policy_id=l3p['id'])['l2_policy']
ptg = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p['id'])['policy_target_group']
net_id = (self._get_ptg_shadow_net(ptg)['id']
if self.non_apic_network else l2p['network_id'])
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
pt2 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt1['port_id'], 'h1')
self._bind_port_to_host(pt2['port_id'], 'h2')
ip_owner_info = {'port': pt1['port_id'], 'ip_address_v4': '1.2.3.4'}
self.driver._notify_port_update = mock.Mock()
# set new owner
self.driver.ip_address_owner_update(context.get_admin_context(),
ip_owner_info=ip_owner_info, host='h1')
obj = self.driver.ha_ip_handler.get_port_for_ha_ipaddress(
'1.2.3.4', net_id)
self.assertEqual(pt1['port_id'], obj['port_id'])
self.driver._notify_port_update.assert_called_with(mock.ANY,
pt1['port_id'])
# update existing owner
self.driver._notify_port_update.reset_mock()
ip_owner_info['port'] = pt2['port_id']
self.driver.ip_address_owner_update(context.get_admin_context(),
ip_owner_info=ip_owner_info, host='h2')
obj = self.driver.ha_ip_handler.get_port_for_ha_ipaddress(
'1.2.3.4', net_id)
self.assertEqual(pt2['port_id'], obj['port_id'])
exp_calls = [
mock.call(mock.ANY, pt1['port_id']),
mock.call(mock.ANY, pt2['port_id'])]
self._check_call_list(exp_calls,
self.driver._notify_port_update.call_args_list)
def test_enhanced_subnet_options(self):
self.driver.enable_metadata_opt = False
l3p = self.create_l3_policy(name='myl3',
ip_pool='192.168.0.0/16')['l3_policy']
l2p = self.create_l2_policy(name='myl2',
l3_policy_id=l3p['id'])['l2_policy']
ptg = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p['id'])['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt1['port_id'], 'h1')
sub = self._get_object('subnets', ptg['subnets'][0],
self.api)
with self.port(subnet=sub, device_owner='network:dhcp',
tenant_id='onetenant') as dhcp:
if self.non_apic_network:
shadow_sub = self._get_object('subnets',
self._get_ptg_shadow_subnet(ptg), self.api)
with self.port(subnet=shadow_sub, tenant_id='onetenant',
device_owner='network:dhcp'):
pass
dhcp = dhcp['port']
details = self.driver.get_gbp_details(
context.get_admin_context(),
device='tap%s' % pt1['port_id'], host='h1')
self.assertEqual(1, len(details['subnets']))
# Verify that DNS nameservers are correctly set
self.assertEqual([dhcp['fixed_ips'][0]['ip_address']],
details['subnets'][0]['dns_nameservers'])
# Verify Default route via GW
self.assertTrue({'destination': '0.0.0.0/0',
'nexthop': '192.168.0.1'} in
details['subnets'][0]['host_routes'])
# Verify Metadata route via DHCP
self.assertTrue(
{'destination': '169.254.169.254/16',
'nexthop': dhcp['fixed_ips'][0]['ip_address']} in
details['subnets'][0]['host_routes'])
# Verify no extra routes are leaking inside
self.assertEqual(2, len(details['subnets'][0]['host_routes']))
self.assertEqual([dhcp['fixed_ips'][0]['ip_address']],
details['subnets'][0]['dhcp_server_ips'])
def test_update_l2p_inject_default_route_false(self):
self.driver.enable_metadata_opt = False
l3p = self.create_l3_policy(name='myl3',
ip_pool='192.168.0.0/16')['l3_policy']
l2p = self.create_l2_policy(name='myl2',
l3_policy_id=l3p['id'])['l2_policy']
ptg = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p['id'])['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt1['port_id'], 'h1')
sub = self._get_object('subnets', ptg['subnets'][0],
self.api)
# Add one more host_route to the subnet
more_host_routes = [{'destination': '172.16.0.0/24',
'nexthop': '10.0.2.2'}]
data = {'subnet': {'host_routes': more_host_routes}}
req = self.new_update_request('subnets', data, sub['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(sorted(res['subnet']['host_routes']),
sorted(more_host_routes))
with self.port(subnet=sub, device_owner='network:dhcp',
tenant_id='onetenant') as dhcp:
if self.non_apic_network:
shadow_sub = self._get_object('subnets',
self._get_ptg_shadow_subnet(ptg), self.api)
with self.port(subnet=shadow_sub, tenant_id='onetenant',
device_owner='network:dhcp'):
pass
dhcp = dhcp['port']
details = self.driver.get_gbp_details(
context.get_admin_context(),
device='tap%s' % pt1['port_id'], host='h1')
self.assertEqual(1, len(details['subnets']))
# Verify that DNS nameservers are correctly set
self.assertEqual([dhcp['fixed_ips'][0]['ip_address']],
details['subnets'][0]['dns_nameservers'])
# Verify Default route via GW
self.assertTrue({'destination': '0.0.0.0/0',
'nexthop': '192.168.0.1'} in
details['subnets'][0]['host_routes'])
# Verify Metadata route via DHCP
self.assertTrue(
{'destination': '169.254.169.254/16',
'nexthop': dhcp['fixed_ips'][0]['ip_address']} in
details['subnets'][0]['host_routes'])
# Verify additional host_routes are also added:
# GW + Metadata + 1 additional route = 3
self.assertEqual(3, len(details['subnets'][0]['host_routes']))
self.assertEqual([dhcp['fixed_ips'][0]['ip_address']],
details['subnets'][0]['dhcp_server_ips'])
# Verify gateway_ip is set
self.assertTrue('gateway_ip' in details['subnets'][0])
data = {'l2_policy': {'inject_default_route': False}}
res = self.new_update_request('l2_policies', data, l2p['id'],
self.fmt).get_response(self.ext_api)
pt2 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt2['port_id'], 'h1')
with self.port(subnet=sub, tenant_id='onetenant'):
details = self.driver.get_gbp_details(
context.get_admin_context(),
device='tap%s' % pt2['port_id'], host='h1')
self.assertEqual(1, len(details['subnets']))
# Verify Default route via GW is not present
self.assertFalse({'destination': '0.0.0.0/0',
'nexthop': '192.168.0.1'} in
details['subnets'][0]['host_routes'])
# Verify Metadata route via DHCP is not present
self.assertFalse(
{'destination': '169.254.169.254/16',
'nexthop': dhcp['fixed_ips'][0]['ip_address']} in
details['subnets'][0]['host_routes'])
# Verify only extra route is present
self.assertEqual(1, len(details['subnets'][0]['host_routes']))
self.assertTrue(
{'destination': '172.16.0.0/24',
'nexthop': '10.0.2.2'} in
details['subnets'][0]['host_routes'])
self.assertEqual([dhcp['fixed_ips'][0]['ip_address']],
details['subnets'][0]['dhcp_server_ips'])
# Verify gateway_ip is not set
self.assertFalse('gateway_ip' in details['subnets'][0])
def test_create_l2p_inject_default_route_false(self):
self.driver.enable_metadata_opt = False
l3p = self.create_l3_policy(name='myl3',
ip_pool='192.168.0.0/16')['l3_policy']
l2p = self.create_l2_policy(name='myl2',
l3_policy_id=l3p['id'],
inject_default_route=False)['l2_policy']
ptg = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p['id'])['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt1['port_id'], 'h1')
sub = self._get_object('subnets', ptg['subnets'][0],
self.api)
# Add one more host_route to the subnet
more_host_routes = [{'destination': '172.16.0.0/24',
'nexthop': '10.0.2.2'}]
data = {'subnet': {'host_routes': more_host_routes}}
req = self.new_update_request('subnets', data, sub['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(sorted(res['subnet']['host_routes']),
sorted(more_host_routes))
with self.port(subnet=sub, device_owner='network:dhcp',
tenant_id='onetenant') as dhcp:
if self.non_apic_network:
shadow_sub = self._get_object('subnets',
self._get_ptg_shadow_subnet(ptg), self.api)
with self.port(subnet=shadow_sub, tenant_id='onetenant',
device_owner='network:dhcp'):
pass
dhcp = dhcp['port']
details = self.driver.get_gbp_details(
context.get_admin_context(),
device='tap%s' % pt1['port_id'], host='h1')
self.assertEqual(1, len(details['subnets']))
# Verify that DNS nameservers are correctly set
self.assertEqual([dhcp['fixed_ips'][0]['ip_address']],
details['subnets'][0]['dns_nameservers'])
# Verify Default route via GW is not present
self.assertFalse({'destination': '0.0.0.0/0',
'nexthop': '192.168.0.1'} in
details['subnets'][0]['host_routes'])
# Verify Metadata route via DHCP is not present
self.assertFalse(
{'destination': '169.254.169.254/16',
'nexthop': dhcp['fixed_ips'][0]['ip_address']} in
details['subnets'][0]['host_routes'])
# Verify only extra route is present
self.assertEqual(1, len(details['subnets'][0]['host_routes']))
self.assertTrue(
{'destination': '172.16.0.0/24',
'nexthop': '10.0.2.2'} in
details['subnets'][0]['host_routes'])
self.assertEqual([dhcp['fixed_ips'][0]['ip_address']],
details['subnets'][0]['dhcp_server_ips'])
# Verify gateway_ip is not set
self.assertFalse('gateway_ip' in details['subnets'][0])
def test_get_gbp_details_error(self):
details = self.driver.get_gbp_details(
context.get_admin_context(), device='tap%s' % 'randomid',
host='h1')
req_details = self.driver.request_endpoint_details(
context.get_admin_context(),
request={'device': 'tap%s' % 'randomid', 'host': 'h1',
'timestamp': 0, 'request_id': 'request_id'})
# device was not found
self.assertTrue('port_id' not in details)
self.assertEqual(details, req_details['gbp_details'])
self.assertTrue('port_id' not in req_details['neutron_details'])
ptg = self.create_policy_target_group()['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt1['port_id'], 'h1')
self.driver._get_owned_addresses = mock.Mock(side_effect=Exception)
details = self.driver.get_gbp_details(
context.get_admin_context(), device='tap%s' % pt1['port_id'],
host='h1')
req_details = self.driver.request_endpoint_details(
context.get_admin_context(),
request={'device': 'tap%s' % pt1['port_id'], 'host': 'h1',
'timestamp': 0, 'request_id': 'request_id'})
# An exception occurred
self.assertEqual({'device': 'tap%s' % pt1['port_id']}, details)
self.assertIsNone(req_details)
def test_get_gbp_proxy_details(self):
l3p_fake = self.create_l3_policy(name='myl3')['l3_policy']
l2p_fake = self.create_l2_policy(
name='myl2', l3_policy_id=l3p_fake['id'])['l2_policy']
ptg_fake = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p_fake['id'])['policy_target_group']
# The PT below will be actually bound for a VM
pt_bound = self.create_policy_target(
policy_target_group_id=ptg_fake['id'])['policy_target']
l3p_real = self.create_l3_policy(name='myl3')['l3_policy']
l2p_real = self.create_l2_policy(
name='myl2', l3_policy_id=l3p_real['id'])['l2_policy']
ptg_real = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p_real['id'])['policy_target_group']
# The PT below will never be bound
pt_unbound = self.create_policy_target(
policy_target_group_id=ptg_real['id'])['policy_target']
# Change description to link the ports. The bound on will point
# to the unbound one to get its info overridden
self.update_policy_target(
pt_bound['id'],
description=amap.PROXY_PORT_PREFIX + pt_unbound['port_id'])
port_unbound = self._get_object('ports', pt_unbound['port_id'],
self.api)['port']
# Bind the first port
self._bind_port_to_host(pt_bound['port_id'], 'h1')
# Get info on bound port
mapping = self.driver.get_gbp_details(context.get_admin_context(),
device='tap%s' % pt_bound['port_id'], host='h1')
# Bound port info
self.assertEqual(pt_bound['port_id'], mapping['port_id'])
self.assertEqual('tap%s' % pt_bound['port_id'], mapping['device'])
# APIC info are from the unbound port
self.assertEqual(ptg_real['id'], mapping['endpoint_group_name'])
self.assertEqual(l3p_real['tenant_id'], mapping['vrf_tenant'])
self.assertEqual(l3p_real['id'], mapping['vrf_name'])
self.assertEqual(port_unbound['fixed_ips'], mapping['fixed_ips'])
def test_get_gbp_details_shadow(self):
l2p = self.create_l2_policy()['l2_policy']
network = self._get_object('networks', l2p['network_id'], self.api)
with self.subnet(network=network) as sub:
with self.port(subnet=sub) as port:
self._bind_port_to_host(port['port']['id'], 'h1')
mapping = self.driver.get_gbp_details(
context.get_admin_context(),
device='tap%s' % port['port']['id'], host='h1')
self.assertEqual(port['port']['id'], mapping['port_id'])
self.assertEqual(amap.SHADOW_PREFIX + l2p['id'],
mapping['endpoint_group_name'])
def test_explicit_port(self):
with self.network() as net:
with self.subnet(network=net) as sub:
l2p = self.create_l2_policy(
network_id=net['network']['id'])['l2_policy']
ptg = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
if self.non_apic_network:
sub = self._get_object('subnets',
self._get_ptg_shadow_subnet(ptg), self.api)
with self.port(subnet=sub) as port:
self._bind_port_to_host(port['port']['id'], 'h1')
self.create_policy_target(
port_id=port['port']['id'],
policy_target_group_id=ptg['id'])
self.assertTrue(self.driver.notifier.port_update.called)
def test_port_update_changed_ptg(self):
ptg = self.create_policy_target_group()['policy_target_group']
ptg2 = self.create_policy_target_group(
l2_policy_id=ptg['l2_policy_id'])['policy_target_group']
pt = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt['port_id'], 'h1')
if not self.non_apic_network:
self.driver.notifier.port_update.reset_mock()
self.update_policy_target(pt['id'],
policy_target_group_id=ptg2['id'])
self.assertTrue(self.driver.notifier.port_update.called)
else:
res = self.update_policy_target(pt['id'],
policy_target_group_id=ptg2['id'],
expected_res_status=400)
self.assertEqual('PTGChangeDisallowedWithNonOpFlexNetwork',
res['NeutronError']['type'])
def test_update_ptg_failed(self):
ptg = self.create_policy_target_group()['policy_target_group']
ptg2 = self.create_policy_target_group()['policy_target_group']
pt = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
res = self.update_policy_target(
pt['id'], policy_target_group_id=ptg2['id'],
expected_res_status=400)
exp = ('PTGChangeDisallowedWithNonOpFlexNetwork'
if self.non_apic_network else 'InvalidPortForPTG')
self.assertEqual(exp, res['NeutronError']['type'])
def test_port_notified_on_subnet_change(self):
ptg = self.create_policy_target_group()['policy_target_group']
pt = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt['port_id'], 'h1')
subnet = self._get_object('subnets', ptg['subnets'][0], self.api)
subnet2 = copy.deepcopy(subnet)
subnet2['subnet']['gateway_ip'] = '10.0.0.254'
subnet2['subnet']['allocation_pools'] = [{
'start': '10.0.0.2', 'end': '10.0.0.250'}]
self.driver.apic_manager.reset_mock()
self.driver.notifier.port_update.reset_mock()
self.driver.process_subnet_changed(context.get_admin_context(),
subnet['subnet'], subnet2['subnet'])
self.assertTrue(self.driver.notifier.port_update.called)
def test_get_gbp_proxy_address_ownership(self):
l3p_fake = self.create_l3_policy(name='myl3')['l3_policy']
l2p_fake = self.create_l2_policy(
name='myl2', l3_policy_id=l3p_fake['id'])['l2_policy']
ptg_fake = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p_fake['id'])['policy_target_group']
# The PT below will be actually bound for a VM. They are in the same
# Network
pt_bound_1 = self.create_policy_target(
policy_target_group_id=ptg_fake['id'])['policy_target']
pt_bound_2 = self.create_policy_target(
policy_target_group_id=ptg_fake['id'])['policy_target']
pt_bound_3 = self.create_policy_target(
policy_target_group_id=ptg_fake['id'])['policy_target']
l3p_real = self.create_l3_policy(name='myl3')['l3_policy']
# Build 2 L2Ps in order to get 2 networks.
l2p_real_1 = self.create_l2_policy(
name='myl2', l3_policy_id=l3p_real['id'])['l2_policy']
l2p_real_2 = self.create_l2_policy(
name='myl2', l3_policy_id=l3p_real['id'])['l2_policy']
ptg_real_1 = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p_real_1['id'])['policy_target_group']
ptg_real_2 = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p_real_2['id'])['policy_target_group']
# The PTs below will never be bound. They are on different networks
pt_unbound_1 = self.create_policy_target(
policy_target_group_id=ptg_real_1['id'])['policy_target']
pt_unbound_2 = self.create_policy_target(
policy_target_group_id=ptg_real_2['id'])['policy_target']
pt_unbound_2_1 = self.create_policy_target(
policy_target_group_id=ptg_real_2['id'])['policy_target']
# Change description to link the ports. The bound one will point
# to the unbound one to get its info overridden
self.update_policy_target(
pt_bound_1['id'],
description=amap.PROXY_PORT_PREFIX + pt_unbound_1['port_id'])
self.update_policy_target(
pt_bound_2['id'],
description=amap.PROXY_PORT_PREFIX + pt_unbound_2['port_id'])
self.update_policy_target(
pt_bound_3['id'],
description=amap.PROXY_PORT_PREFIX + pt_unbound_2_1['port_id'])
# Set up address ownership on the bound ports, and verify that both
# entries exists
# Update address ownership on second port
self.driver.update_ip_owner({'port': pt_bound_1['port_id'],
'ip_address_v4': '1.1.1.1'})
# Same address owned by another port in a different subnet
self.driver.update_ip_owner({'port': pt_bound_2['port_id'],
'ip_address_v4': '1.1.1.1'})
# There are 2 ownership entries for the same address
entries = self.driver.ha_ip_handler.session.query(
ha_ip_db.HAIPAddressToPortAssocation).all()
self.assertEqual(2, len(entries))
self.assertEqual('1.1.1.1', entries[0].ha_ip_address)
self.assertEqual('1.1.1.1', entries[1].ha_ip_address)
self.driver.update_ip_owner({'port': pt_bound_3['port_id'],
'ip_address_v4': '1.1.1.1'})
entries = self.driver.ha_ip_handler.session.query(
ha_ip_db.HAIPAddressToPortAssocation).all()
self.assertEqual(2, len(entries))
self.assertEqual('1.1.1.1', entries[0].ha_ip_address)
self.assertEqual('1.1.1.1', entries[1].ha_ip_address)
class TestPolicyTargetVlanNetwork(ApicMappingVlanTestCase,
TestPolicyTarget):
def test_shadow_port(self):
ptg1 = self.create_policy_target_group(
name="ptg1")['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg1['id'])['policy_target']
shadow_port = self._get_object('ports', pt1['port_id'],
self.api)['port']
subnet = self._get_object('subnets', ptg1['subnets'][0], self.api)
ports = self._list_resource('ports',
self.api, network_id=subnet['subnet']['network_id'])['ports']
self.assertEqual(1, len(ports))
self.assertEqual(shadow_port['mac_address'], ports[0]['mac_address'])
self.assertEqual(len(shadow_port['fixed_ips']),
len(ports[0]['fixed_ips']))
self.assertEqual(shadow_port['fixed_ips'][0]['ip_address'],
ports[0]['fixed_ips'][0]['ip_address'])
self.delete_policy_target(pt1['id'])
self._get_object('ports', pt1['port_id'], self.api,
expected_res_status=404)
self._get_object('ports', ports[0]['id'], self.api,
expected_res_status=404)
def test_shadow_port_for_explicit_port(self):
ptg1 = self.create_policy_target_group()['policy_target_group']
shadow_subnet1 = self._get_object('subnets',
self._get_ptg_shadow_subnet(ptg1),
self.api)
subnet = self._get_object('subnets', ptg1['subnets'][0], self.api)
with self.port(subnet=shadow_subnet1) as p:
port1 = p['port']
pt1 = self.create_policy_target(policy_target_group_id=ptg1['id'],
port_id=port1['id'])['policy_target']
subnet = self._get_object('subnets', ptg1['subnets'][0], self.api)
ports = self._list_resource('ports',
self.api, network_id=subnet['subnet']['network_id'])['ports']
self.assertEqual(1, len(ports))
self.assertEqual(port1['mac_address'], ports[0]['mac_address'])
self.assertEqual(len(port1['fixed_ips']),
len(ports[0]['fixed_ips']))
self.assertEqual(port1['fixed_ips'][0]['ip_address'],
ports[0]['fixed_ips'][0]['ip_address'])
self.delete_policy_target(pt1['id'])
self._get_object('ports', pt1['port_id'], self.api,
expected_res_status=200)
self._get_object('ports', ports[0]['id'], self.api,
expected_res_status=404)
def test_explicit_port_wrong_network(self):
ptg1 = self.create_policy_target_group()['policy_target_group']
subnet = self._get_object('subnets', ptg1['subnets'][0], self.api)
with self.port(subnet=subnet) as port1:
res = self.create_policy_target(policy_target_group_id=ptg1['id'],
port_id=port1['port']['id'], expected_res_status=400)
self.assertEqual('ExplicitPortInWrongNetwork',
res['NeutronError']['type'])
def test_explicit_port_overlap_address(self):
ptg1 = self.create_policy_target_group(
name="ptg1")['policy_target_group']
subnet = self._get_object('subnets', ptg1['subnets'][0], self.api)
shadow_subnet1 = self._get_object('subnets',
self._get_ptg_shadow_subnet(ptg1),
self.api)
with self.port(subnet=shadow_subnet1) as p:
shadow_port1 = p
ips = shadow_port1['port']['fixed_ips']
ips[0].pop('subnet_id', None)
with self.port(subnet=subnet, fixed_ips=ips) as p:
res = self.create_policy_target(
policy_target_group_id=ptg1['id'],
port_id=shadow_port1['port']['id'], expected_res_status=400)
self.assertEqual('ExplicitPortOverlap',
res['NeutronError']['type'])
res = self.new_delete_request('ports', p['port']['id'],
self.fmt).get_response(self.api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
with self.port(subnet=subnet,
mac_address=shadow_port1['port']['mac_address']) as p:
res = self.create_policy_target(
policy_target_group_id=ptg1['id'],
port_id=shadow_port1['port']['id'], expected_res_status=400)
self.assertEqual('ExplicitPortOverlap',
res['NeutronError']['type'])
def test_path_static_binding_implicit_port(self):
mgr = self.driver.apic_manager
ptg1 = self.create_policy_target_group(
name="ptg1")['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg1['id'])['policy_target']
self._bind_port_to_host(pt1['port_id'], 'h1')
port_ctx = self.driver._core_plugin.get_bound_port_context(
context.get_admin_context(), pt1['port_id'])
seg_id = port_ctx.bottom_bound_segment['segmentation_id']
mgr.ensure_path_created_for_port.assert_called_once_with(
ptg1['tenant_id'], ptg1['id'], 'h1', seg_id,
bd_name=ptg1['l2_policy_id'])
# move port to different host
mgr.ensure_path_created_for_port.reset_mock()
self._bind_port_to_host(pt1['port_id'], 'h2')
mgr.ensure_path_deleted_for_port.assert_called_once_with(
ptg1['tenant_id'], ptg1['id'], 'h1')
mgr.ensure_path_created_for_port.assert_called_once_with(
ptg1['tenant_id'], ptg1['id'], 'h2', seg_id,
bd_name=ptg1['l2_policy_id'])
# create another PT, bind to same host and then delete it
mgr.ensure_path_created_for_port.reset_mock()
mgr.ensure_path_deleted_for_port.reset_mock()
pt2 = self.create_policy_target(
policy_target_group_id=ptg1['id'])['policy_target']
self._bind_port_to_host(pt2['port_id'], 'h2')
mgr.ensure_path_created_for_port.assert_called_once_with(
ptg1['tenant_id'], ptg1['id'], 'h2', seg_id,
bd_name=ptg1['l2_policy_id'])
self.delete_policy_target(pt2['id'])
mgr.ensure_path_deleted_for_port.assert_not_called()
# delete PT
mgr.ensure_path_deleted_for_port.reset_mock()
self.delete_policy_target(pt1['id'])
mgr.ensure_path_deleted_for_port.assert_called_once_with(
ptg1['tenant_id'], ptg1['id'], 'h2')
def test_path_static_binding_explicit_port(self):
mgr = self.driver.apic_manager
ptg1 = self.create_policy_target_group(
name="ptg1")['policy_target_group']
shadow_subnet1 = self._get_object('subnets',
self._get_ptg_shadow_subnet(ptg1),
self.api)
with self.port(subnet=shadow_subnet1) as port:
port1 = port
port1 = self._bind_port_to_host(port1['port']['id'], 'h1')
port_ctx = self.driver._core_plugin.get_bound_port_context(
context.get_admin_context(), port1['port']['id'])
seg_id = port_ctx.bottom_bound_segment['segmentation_id']
mgr.ensure_path_created_for_port.assert_not_called()
# Assign port to a PT
pt1 = self.create_policy_target(
policy_target_group_id=ptg1['id'],
port_id=port1['port']['id'])['policy_target']
mgr.ensure_path_created_for_port.assert_called_once_with(
ptg1['tenant_id'], ptg1['id'], 'h1', seg_id,
bd_name=ptg1['l2_policy_id'])
# move port to different host
mgr.ensure_path_created_for_port.reset_mock()
self._bind_port_to_host(pt1['port_id'], 'h2')
mgr.ensure_path_deleted_for_port.assert_called_once_with(
ptg1['tenant_id'], ptg1['id'], 'h1')
mgr.ensure_path_created_for_port.assert_called_once_with(
ptg1['tenant_id'], ptg1['id'], 'h2', seg_id,
bd_name=ptg1['l2_policy_id'])
# create another port & PT, bind to same host and then delete port
mgr.ensure_path_created_for_port.reset_mock()
mgr.ensure_path_deleted_for_port.reset_mock()
with self.port(subnet=shadow_subnet1) as port:
port2 = port
pt2 = self.create_policy_target(
policy_target_group_id=ptg1['id'],
port_id=port2['port']['id'])['policy_target']
self._bind_port_to_host(pt2['port_id'], 'h2')
mgr.ensure_path_created_for_port.assert_called_once_with(
ptg1['tenant_id'], ptg1['id'], 'h2', seg_id,
bd_name=ptg1['l2_policy_id'])
res = self.new_delete_request('ports', port2['port']['id'],
self.fmt).get_response(self.api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
mgr.ensure_path_deleted_for_port.assert_not_called()
# Delete PT
mgr.ensure_path_deleted_for_port.reset_mock()
self.delete_policy_target(pt1['id'])
mgr.ensure_path_deleted_for_port.assert_called_once_with(
ptg1['tenant_id'], ptg1['id'], 'h2')
def test_path_static_binding_for_non_pt(self):
mgr = self.driver.apic_manager
ptg1 = self.create_policy_target_group(
name="ptg1")['policy_target_group']
subnet = self._get_object('subnets', ptg1['subnets'][0], self.api)
with self.port(subnet=subnet) as port:
port1 = port
with self.port(subnet=subnet) as port:
port2 = port
# bind first port
port1 = self._bind_port_to_host(port1['port']['id'], 'h1')
port_ctx = self.driver._core_plugin.get_bound_port_context(
context.get_admin_context(), port1['port']['id'])
seg_id = port_ctx.bottom_bound_segment['segmentation_id']
mgr.ensure_path_created_for_port.assert_called_once_with(
ptg1['tenant_id'], 'Shd-%s' % ptg1['l2_policy_id'], 'h1',
seg_id, bd_name=ptg1['l2_policy_id'])
# bind second port
mgr.ensure_path_created_for_port.reset_mock()
port2 = self._bind_port_to_host(port2['port']['id'], 'h1')
mgr.ensure_path_created_for_port.assert_called_once_with(
ptg1['tenant_id'], 'Shd-%s' % ptg1['l2_policy_id'], 'h1',
seg_id, bd_name=ptg1['l2_policy_id'])
# delete second port
res = self.new_delete_request('ports', port2['port']['id'],
self.fmt).get_response(self.api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
mgr.ensure_path_deleted_for_port.assert_not_called()
# delete first port
mgr.ensure_path_deleted_for_port.reset_mock()
res = self.new_delete_request('ports', port1['port']['id'],
self.fmt).get_response(self.api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
mgr.ensure_path_deleted_for_port.assert_called_once_with(
ptg1['tenant_id'], 'Shd-%s' % ptg1['l2_policy_id'], 'h1')
class FakeNetworkContext(object):
"""To generate network context for testing purposes only."""
def __init__(self, network, segments):
self._network = network
self._segments = segments
self._plugin_context = mock.Mock()
@property
def current(self):
return self._network
@property
def network_segments(self):
return self._segments
class FakePortContext(object):
"""To generate port context for testing purposes only."""
def __init__(self, port, network):
self._port = port
self._network = network
self._plugin = mock.Mock()
self._plugin_context = mock.Mock()
self._plugin.get_ports.return_value = []
if network.network_segments:
self._bound_segment = network.network_segments[0]
else:
self._bound_segment = None
self.current = self._port
self.original = self._port
self.network = self._network
self.top_bound_segment = self._bound_segment
self.bottom_bound_segment = self._bound_segment
self.host = self._port.get(portbindings.HOST_ID)
self.original_host = None
self._binding = mock.Mock()
self._binding.segment = self._bound_segment
def set_binding(self, segment_id, vif_type, cap_port_filter):
pass
class TestPolicyTargetDvs(ApicMappingTestCase):
def setUp(self):
super(TestPolicyTargetDvs, self).setUp()
self.driver.apic_manager.app_profile_name = mocked.APIC_AP
plugin = manager.NeutronManager.get_plugin()
self.ml2 = plugin.mechanism_manager.mech_drivers['apic_gbp'].obj
self.ml2._dvs_notifier = mock.MagicMock()
self.ml2.dvs_notifier.bind_port_call = mock.Mock(
return_value=BOOKED_PORT_VALUE)
mapper = self.driver.name_mapper
mapper.name_mapper.policy_taget_group.return_value = 'ptg1'
def _verify_dvs_notifier(self, notifier, port, host):
# can't use getattr() with mock, so use eval instead
try:
dvs_mock = eval('self.ml2.dvs_notifier.' + notifier)
except Exception:
self.assertTrue(False,
"The method " + notifier + " was not called")
return
self.assertTrue(dvs_mock.called)
a1, a2, a3, a4 = dvs_mock.call_args[0]
self.assertEqual(a1['id'], port['id'])
self.assertEqual(a2['id'], port['id'])
self.assertEqual(a4, host)
def _pg_name(self, project, profile, network):
return (str(project) + '|' + str(profile) + '|' + network)
def test_bind_port_dvs(self):
self.agent_conf = AGENT_CONF_DVS
l3p_fake = self.create_l3_policy(name='myl3')['l3_policy']
l2p_fake = self.create_l2_policy(
name='myl2', l3_policy_id=l3p_fake['id'])['l2_policy']
ptg = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p_fake['id'])['policy_target_group']
pt = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
newp1 = self._bind_port_to_host(pt['port_id'], 'h1')
vif_details = newp1['port']['binding:vif_details']
self.assertIsNotNone(vif_details.get('dvs_port_group_name'))
pg = self._pg_name(ptg['tenant_id'], mocked.APIC_AP, ptg['name'])
self.assertEqual(pg, vif_details.get('dvs_port_group_name'))
port_key = newp1['port']['binding:vif_details'].get('dvs_port_key')
self.assertIsNotNone(port_key)
self.assertEqual(port_key, BOOKED_PORT_VALUE)
self._verify_dvs_notifier('update_postcommit_port_call',
newp1['port'], 'h1')
net_ctx = FakeNetworkContext(mock.Mock(), [mock.Mock()])
port_ctx = FakePortContext(newp1['port'], net_ctx)
self.ml2.delete_port_postcommit(port_ctx)
self._verify_dvs_notifier('delete_port_call', newp1['port'], 'h1')
def test_bind_port_dvs_with_opflex_different_hosts(self):
l3p_fake = self.create_l3_policy(name='myl3')['l3_policy']
l2p_fake = self.create_l2_policy(
name='myl2', l3_policy_id=l3p_fake['id'])['l2_policy']
ptg = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p_fake['id'])['policy_target_group']
self.agent_conf = AGENT_CONF
pt2 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
newp2 = self._bind_port_to_host(pt2['port_id'], 'h2')
vif_details = newp2['port']['binding:vif_details']
self.assertIsNone(vif_details.get('dvs_port_group_name'))
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self.agent_conf = AGENT_CONF_DVS
self.ml2._dvs_notifier.reset_mock()
newp1 = self._bind_port_to_host(pt1['port_id'], 'h2')
port_key = newp1['port']['binding:vif_details'].get('dvs_port_key')
self.assertIsNotNone(port_key)
self.assertEqual(port_key, BOOKED_PORT_VALUE)
vif_details = newp1['port']['binding:vif_details']
self.assertIsNotNone(vif_details.get('dvs_port_group_name'))
pg = self._pg_name(ptg['tenant_id'], mocked.APIC_AP, ptg['name'])
self.assertEqual(pg, vif_details.get('dvs_port_group_name'))
self._verify_dvs_notifier('update_postcommit_port_call',
newp1['port'], 'h2')
net_ctx = FakeNetworkContext(mock.Mock(), [mock.Mock()])
port_ctx = FakePortContext(newp1['port'], net_ctx)
self.ml2.delete_port_postcommit(port_ctx)
self._verify_dvs_notifier('delete_port_call', newp1['port'], 'h2')
def test_bind_ports_opflex_same_host(self):
l3p_fake = self.create_l3_policy(name='myl3')['l3_policy']
l2p_fake = self.create_l2_policy(
name='myl2', l3_policy_id=l3p_fake['id'])['l2_policy']
ptg = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p_fake['id'])['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
newp1 = self._bind_port_to_host(pt1['port_id'], 'h1')
vif_details = newp1['port']['binding:vif_details']
self.assertIsNone(vif_details.get('dvs_port_group_name'))
port_key = newp1['port']['binding:vif_details'].get('dvs_port_key')
self.assertIsNone(port_key)
dvs_mock = self.ml2.dvs_notifier.update_postcommit_port_call
dvs_mock.assert_not_called()
net_ctx = FakeNetworkContext(mock.Mock(), [mock.Mock()])
port_ctx = FakePortContext(newp1['port'], net_ctx)
self.ml2.delete_port_postcommit(port_ctx)
dvs_mock = self.ml2.dvs_notifier.delete_port_call
dvs_mock.assert_not_called()
self.ml2.dvs_notifier.reset_mock()
pt2 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
newp2 = self._bind_port_to_host(pt2['port_id'], 'h1')
vif_details = newp2['port']['binding:vif_details']
self.assertIsNone(vif_details.get('dvs_port_group_name'))
port_key = newp2['port']['binding:vif_details'].get('dvs_port_key')
self.assertIsNone(port_key)
dvs_mock.assert_not_called()
net_ctx = FakeNetworkContext(mock.Mock(), [mock.Mock()])
port_ctx = FakePortContext(newp2['port'], net_ctx)
self.ml2.delete_port_postcommit(port_ctx)
dvs_mock = self.ml2.dvs_notifier.delete_port_call
dvs_mock.assert_not_called()
def test_bind_ports_dvs_with_opflex_same_host(self):
self.agent_conf = AGENT_CONF_DVS
l3p_fake = self.create_l3_policy(name='myl3')['l3_policy']
l2p_fake = self.create_l2_policy(
name='myl2', l3_policy_id=l3p_fake['id'])['l2_policy']
ptg = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p_fake['id'])['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
newp1 = self._bind_port_to_host(pt1['port_id'], 'h1')
vif_details = newp1['port']['binding:vif_details']
self.assertIsNotNone(vif_details.get('dvs_port_group_name'))
port_key = newp1['port']['binding:vif_details'].get('dvs_port_key')
self.assertIsNotNone(port_key)
self.assertEqual(port_key, BOOKED_PORT_VALUE)
self._verify_dvs_notifier('update_postcommit_port_call',
newp1['port'], 'h1')
net_ctx = FakeNetworkContext(mock.Mock(), [mock.Mock()])
port_ctx = FakePortContext(newp1['port'], net_ctx)
self.ml2.delete_port_postcommit(port_ctx)
self._verify_dvs_notifier('delete_port_call', newp1['port'], 'h1')
self.ml2.dvs_notifier.reset_mock()
self.agent_conf = AGENT_CONF
pt2 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
newp2 = self._bind_dhcp_port_to_host(pt2['port_id'], 'h1')
vif_details = newp2['port']['binding:vif_details']
self.assertIsNone(vif_details.get('dvs_port_group_name'))
port_key = newp2['port']['binding:vif_details'].get('dvs_port_key')
self.assertIsNone(port_key)
dvs_mock = self.ml2.dvs_notifier.update_postcommit_port_call
dvs_mock.assert_not_called()
net_ctx = FakeNetworkContext(mock.Mock(), [mock.Mock()])
port_ctx = FakePortContext(newp2['port'], net_ctx)
self.ml2.delete_port_postcommit(port_ctx)
dvs_mock = self.ml2.dvs_notifier.delete_port_call
dvs_mock.assert_not_called()
def test_bind_port_dvs_shared(self):
self.agent_conf = AGENT_CONF_DVS
ptg = self.create_policy_target_group(shared=True,
name="ptg1")['policy_target_group']
pt = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
newp1 = self._bind_port_to_host(pt['port_id'], 'h1')
vif_details = newp1['port']['binding:vif_details']
self.assertIsNotNone(vif_details.get('dvs_port_group_name'))
pg = self._pg_name(amap.apic_manager.TENANT_COMMON,
mocked.APIC_AP, ptg['name'])
self.assertEqual(pg, vif_details.get('dvs_port_group_name'))
port_key = newp1['port']['binding:vif_details'].get('dvs_port_key')
self.assertIsNotNone(port_key)
self.assertEqual(port_key, BOOKED_PORT_VALUE)
self._verify_dvs_notifier('update_postcommit_port_call',
newp1['port'], 'h1')
net_ctx = FakeNetworkContext(mock.Mock(), [mock.Mock()])
port_ctx = FakePortContext(newp1['port'], net_ctx)
self.ml2.delete_port_postcommit(port_ctx)
self._verify_dvs_notifier('delete_port_call', newp1['port'], 'h1')
class TestPolicyTargetGroup(ApicMappingTestCase):
def _test_policy_target_group_created_on_apic(self, shared=False):
ptg = self.create_policy_target_group(
name="ptg1", shared=shared)['policy_target_group']
tenant = self.common_tenant if shared else ptg['tenant_id']
mgr = self.driver.apic_manager
expected_calls = [
mock.call(tenant, ptg['id'], bd_name=ptg['l2_policy_id'],
bd_owner=tenant),
mock.call(tenant, amap.SHADOW_PREFIX + ptg['l2_policy_id'],
bd_name=ptg['l2_policy_id'], bd_owner=tenant,
transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.ensure_epg_created.call_args_list)
def test_policy_target_group_created_on_apic(self):
self._test_policy_target_group_created_on_apic()
def test_policy_target_group_created_on_apic_shared(self):
self._test_policy_target_group_created_on_apic(shared=True)
def _test_ptg_policy_rule_set_created(self, provider=True, shared=False):
cntr = self.create_policy_rule_set(name='c',
shared=shared)['policy_rule_set']
l2p = self.create_l2_policy()['l2_policy']
mgr = self.driver.apic_manager
mgr.set_contract_for_epg.reset_mock()
if provider:
ptg = self.create_policy_target_group(
l2_policy_id=l2p['id'],
provided_policy_rule_sets={cntr['id']: 'scope'})[
'policy_target_group']
else:
ptg = self.create_policy_target_group(
l2_policy_id=l2p['id'],
consumed_policy_rule_sets={cntr['id']: 'scope'})[
'policy_target_group']
# Verify that the apic call is issued
ct_owner = self.common_tenant if shared else cntr['tenant_id']
expected_calls = [
mock.call(
ptg['tenant_id'], ptg['id'], cntr['id'],
transaction=mock.ANY, contract_owner=ct_owner,
provider=provider),
mock.call(
ptg['tenant_id'], ptg['id'],
amap.SERVICE_PREFIX + ptg['l2_policy_id'],
transaction=mock.ANY, contract_owner=ptg['tenant_id'],
provider=False),
mock.call(
ptg['tenant_id'], ptg['id'],
amap.IMPLICIT_PREFIX + ptg['l2_policy_id'],
transaction=mock.ANY, contract_owner=ptg['tenant_id'],
provider=True),
mock.call(
ptg['tenant_id'], ptg['id'],
amap.IMPLICIT_PREFIX + ptg['l2_policy_id'],
transaction=mock.ANY, contract_owner=ptg['tenant_id'],
provider=False)]
self._check_call_list(expected_calls,
mgr.set_contract_for_epg.call_args_list)
def _test_ptg_policy_rule_set_updated(self, provider=True, shared=False):
p_or_c = {True: 'provided_policy_rule_sets',
False: 'consumed_policy_rule_sets'}
cntr = self.create_policy_rule_set(
name='c1', shared=shared)['policy_rule_set']
new_cntr = self.create_policy_rule_set(
name='c2', shared=shared)['policy_rule_set']
if provider:
ptg = self.create_policy_target_group(
provided_policy_rule_sets={cntr['id']: 'scope'})
else:
ptg = self.create_policy_target_group(
consumed_policy_rule_sets={cntr['id']: 'scope'})
data = {'policy_target_group': {p_or_c[provider]:
{new_cntr['id']: 'scope'}}}
req = self.new_update_request('policy_target_groups', data,
ptg['policy_target_group']['id'],
self.fmt)
ptg = self.deserialize(self.fmt, req.get_response(self.ext_api))
ptg = ptg['policy_target_group']
mgr = self.driver.apic_manager
ct_owner = self.common_tenant if shared else cntr['tenant_id']
mgr.set_contract_for_epg.assert_called_with(
ptg['tenant_id'], ptg['id'], new_cntr['id'],
contract_owner=ct_owner, transaction=mock.ANY,
provider=provider)
mgr.unset_contract_for_epg.assert_called_with(
ptg['tenant_id'], ptg['id'], cntr['id'],
contract_owner=ct_owner,
transaction=mock.ANY, provider=provider)
def test_ptg_policy_rule_set_provider_created(self):
self._test_ptg_policy_rule_set_created()
def test_ptg_policy_rule_set_provider_updated(self):
self._test_ptg_policy_rule_set_updated()
def test_ptg_policy_rule_set_consumer_created(self):
self._test_ptg_policy_rule_set_created(False)
def test_ptg_policy_rule_set_consumer_updated(self):
self._test_ptg_policy_rule_set_updated(False)
def test_ptg_policy_rule_set_provider_created_shared(self):
self._test_ptg_policy_rule_set_created(shared=True)
def test_ptg_policy_rule_set_provider_updated_shared(self):
self._test_ptg_policy_rule_set_updated(shared=True)
def test_ptg_policy_rule_set_consumer_created_shared(self):
self._test_ptg_policy_rule_set_created(False, shared=True)
def test_ptg_policy_rule_set_consumer_updated_shared(self):
self._test_ptg_policy_rule_set_updated(False, shared=True)
def _test_policy_target_group_deleted_on_apic(self, shared=False):
ptg = self.create_policy_target_group(
name="ptg1", shared=shared)['policy_target_group']
req = self.new_delete_request('policy_target_groups',
ptg['id'], self.fmt)
req.get_response(self.ext_api)
mgr = self.driver.apic_manager
tenant = self.common_tenant if shared else ptg['tenant_id']
expected_calls = [
mock.call(tenant, ptg['id']),
mock.call(tenant, amap.SHADOW_PREFIX + ptg['l2_policy_id'],
transaction=mock.ANY)]
self._check_call_list(expected_calls,
mgr.delete_epg_for_network.call_args_list)
def test_policy_target_group_deleted_on_apic(self):
self._test_policy_target_group_deleted_on_apic()
def test_policy_target_group_deleted_on_apic_shared(self):
self._test_policy_target_group_deleted_on_apic(shared=True)
def _test_policy_target_group_subnet_created_on_apic(self, shared=False):
ptg = self._create_explicit_subnet_ptg('10.0.0.0/24', shared=shared)
mgr = self.driver.apic_manager
tenant = self.common_tenant if shared else ptg['tenant_id']
mgr.ensure_subnet_created_on_apic.assert_called_once_with(
tenant, ptg['l2_policy_id'], '10.0.0.1/24',
transaction=mock.ANY)
def test_policy_target_group_subnet_created_on_apic(self):
self._test_policy_target_group_subnet_created_on_apic()
def test_policy_target_group_subnet_created_on_apic_shared(self):
self._test_policy_target_group_subnet_created_on_apic(shared=True)
def _test_policy_target_group_subnet_added(self, shared=False):
ptg = self._create_explicit_subnet_ptg('10.0.0.0/24', shared=shared)
l2p = self._get_object('l2_policies', ptg['l2_policy_id'],
self.ext_api)
network = self._get_object('networks', l2p['l2_policy']['network_id'],
self.api)
with self.subnet(network=network, cidr='10.0.1.0/24') as subnet:
data = {'policy_target_group':
{'subnets': ptg['subnets'] + [subnet['subnet']['id']]}}
mgr = self.driver.apic_manager
self.new_update_request('policy_target_groups', data, ptg['id'],
self.fmt).get_response(self.ext_api)
tenant = self.common_tenant if shared else ptg['tenant_id']
mgr.ensure_subnet_created_on_apic.assert_called_with(
tenant, ptg['l2_policy_id'], '10.0.1.1/24',
transaction=mock.ANY)
def test_policy_target_group_subnet_added(self):
self._test_policy_target_group_subnet_added()
def test_policy_target_group_subnet_added_shared(self):
self._test_policy_target_group_subnet_added(shared=True)
def _test_process_subnet_update(self, shared=False):
ptg = self._create_explicit_subnet_ptg('10.0.0.0/24', shared=shared)
subnet = self._get_object('subnets', ptg['subnets'][0], self.api)
subnet2 = copy.deepcopy(subnet)
subnet2['subnet']['gateway_ip'] = '10.0.0.254'
mgr = self.driver.apic_manager
mgr.reset_mock()
self.driver.process_subnet_changed(context.get_admin_context(),
subnet['subnet'], subnet2['subnet'])
tenant = self.common_tenant if shared else ptg['tenant_id']
mgr.ensure_subnet_created_on_apic.assert_called_once_with(
tenant, ptg['l2_policy_id'], '10.0.0.254/24',
transaction=mock.ANY)
mgr.ensure_subnet_deleted_on_apic.assert_called_with(
tenant, ptg['l2_policy_id'], '10.0.0.1/24',
transaction=mock.ANY)
def test_process_subnet_update(self):
self._test_process_subnet_update()
def test_process_subnet_update_shared(self):
self._test_process_subnet_update(shared=True)
def test_multiple_ptg_per_l2p(self):
l2p = self.create_l2_policy()['l2_policy']
# Create first PTG
ptg1 = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
ptg2 = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
self.assertEqual(ptg1['subnets'], ptg2['subnets'])
def test_force_add_subnet(self):
l2p = self.create_l2_policy()['l2_policy']
# Create first PTG
ptg1 = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
ptg2 = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
ctx = p_context.PolicyTargetGroupContext(
self.driver.gbp_plugin, context.get_admin_context(), ptg2)
# Emulate force add
self.driver._use_implicit_subnet(ctx, force_add=True)
# There now a new subnet, and it's added to both the PTGs
self.assertEqual(2, len(ctx.current['subnets']))
ptg1 = self.show_policy_target_group(ptg1['id'])['policy_target_group']
self.assertEqual(2, len(ptg1['subnets']))
ptg2 = self.show_policy_target_group(ptg2['id'])['policy_target_group']
self.assertEqual(2, len(ptg2['subnets']))
self.assertEqual(set(ptg1['subnets']), set(ptg2['subnets']))
self.assertNotEqual(ptg2['subnets'][0], ptg2['subnets'][1])
def test_subnets_unique_per_l3p(self):
l3p = self.create_l3_policy(shared=True, tenant_id='admin',
is_admin_context=True)['l3_policy']
l2p1 = self.create_l2_policy(
tenant_id='hr', l3_policy_id=l3p['id'])['l2_policy']
l2p2 = self.create_l2_policy(
tenant_id='eng', l3_policy_id=l3p['id'])['l2_policy']
ptg1 = self.create_policy_target_group(
tenant_id='hr', l2_policy_id=l2p1['id'])['policy_target_group']
ptg2 = self.create_policy_target_group(
tenant_id='eng', l2_policy_id=l2p2['id'])['policy_target_group']
sub_ptg_1 = set(self._get_object('subnets',
x, self.api)['subnet']['cidr']
for x in ptg1['subnets'])
sub_ptg_2 = set(self._get_object('subnets',
x, self.api)['subnet']['cidr']
for x in ptg2['subnets'])
self.assertNotEqual(sub_ptg_1, sub_ptg_2)
self.assertFalse(sub_ptg_1 & sub_ptg_2)
def test_preexisting_l2p_no_service_contracts(self):
# Circumvent name validation
self.driver.name_mapper.has_valid_name = (
self.driver.name_mapper._is_apic_reference)
self.driver.name_mapper.tenant = mock.Mock(
return_value=self._tenant_id)
self.driver.name_mapper.dn_manager.decompose_bridge_domain = mock.Mock(
return_value=['preexisting'])
self.driver._configure_epg_service_contract = mock.Mock()
self.driver._configure_epg_implicit_contract = mock.Mock()
l2p = self.create_l2_policy(name='apic:preexisting')['l2_policy']
self.create_policy_target_group(l2_policy_id=l2p['id'])
self.assertFalse(self.driver._configure_epg_service_contract.called)
self.assertFalse(self.driver._configure_epg_implicit_contract.called)
# Use non-preexisting L2P
self.create_policy_target_group()
self.assertTrue(self.driver._configure_epg_service_contract.called)
self.assertTrue(self.driver._configure_epg_implicit_contract.called)
def _create_explicit_subnet_ptg(self, cidr, shared=False, alloc_pool=None):
l2p = self.create_l2_policy(name="l2p", shared=shared)
l2p_id = l2p['l2_policy']['id']
network_id = l2p['l2_policy']['network_id']
network = self._get_object('networks', network_id, self.api)
pool = alloc_pool or [{'start': '10.0.0.2', 'end': '10.0.0.250'}]
with self.subnet(network=network, cidr=cidr,
allocation_pools=pool):
# The subnet creation in the proper network causes the subnet ID
# to be added to the PTG
return self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p_id,
shared=shared)['policy_target_group']
class TestPolicyTargetGroupVlanNetwork(ApicMappingVlanTestCase,
TestPolicyTargetGroup):
def _test_shadow_network(self, shared):
ptg1 = self.create_policy_target_group(
name='ptg1', shared=shared)['policy_target_group']
l2p = self.show_l2_policy(ptg1['l2_policy_id'])['l2_policy']
net = self._get_object('networks', l2p['network_id'],
self.api)['network']
subnet1 = self._get_object('subnets', net['subnets'][0],
self.api)['subnet']
shadow_net1 = self._get_ptg_shadow_net(ptg1)
self.assertIsNotNone(shadow_net1)
self.assertEqual(ptg1['tenant_id'], shadow_net1['tenant_id'])
self.assertEqual(shared, shadow_net1['shared'])
self.assertEqual(1, len(shadow_net1['subnets']))
shadow_subnet1 = self._get_object('subnets',
shadow_net1['subnets'][0], self.api)['subnet']
self.assertEqual(subnet1['cidr'], shadow_subnet1['cidr'])
self.assertEqual(ptg1['tenant_id'], shadow_subnet1['tenant_id'])
self.delete_policy_target_group(ptg1['id'])
self._get_object('subnets', shadow_subnet1['id'], self.api,
expected_res_status=404)
self._get_object('networks', shadow_net1['id'], self.api,
expected_res_status=404)
def test_shadow_network(self):
self._test_shadow_network(False)
def test_shadow_network_shared(self):
self._test_shadow_network(True)
def _test_shadow_subnet(self, shared):
ptg1 = self.create_policy_target_group(
name='ptg1', shared=shared)['policy_target_group']
l2p = self.show_l2_policy(ptg1['l2_policy_id'])['l2_policy']
net = self._get_object('networks', l2p['network_id'],
self.api)['network']
subnet1 = self._get_object('subnets', net['subnets'][0],
self.api)['subnet']
shadow_net1 = self._get_ptg_shadow_net(ptg1)
with self.subnet(cidr='20.0.0.0/26',
network={'network': net}) as subnet2:
subnet2 = subnet2['subnet']
shadow_subnets = self._list_resource(
'subnets', self.api, network_id=shadow_net1['id'])['subnets']
shadow_subnets = sorted(shadow_subnets, key=lambda x: x['cidr'])
self.assertEqual(2, len(shadow_subnets))
self.assertEqual(subnet1['cidr'], shadow_subnets[0]['cidr'])
self.assertEqual(subnet2['cidr'], shadow_subnets[1]['cidr'])
self.assertTrue(shadow_subnets[0]['enable_dhcp'])
self.assertTrue(shadow_subnets[1]['enable_dhcp'])
subnet1 = self._update_resource(subnet1['id'], 'subnet',
expected_res_status=200, api=self.api,
enable_dhcp=False)['subnet']
self.assertFalse(subnet1['enable_dhcp'])
shadow_subnets = self._list_resource(
'subnets', self.api, network_id=shadow_net1['id'])['subnets']
shadow_subnets = sorted(shadow_subnets, key=lambda x: x['cidr'])
self.assertFalse(shadow_subnets[0]['enable_dhcp'])
self.delete_policy_target_group(ptg1['id'])
shadow_subnets = self._list_resource('subnets', self.api,
network_id=shadow_net1['id'], expected_res_status=200)['subnets']
self.assertEqual([], shadow_subnets)
def test_shadow_subnet(self):
self._test_shadow_subnet(False)
def test_shadow_subnet_shared(self):
self._test_shadow_subnet(True)
def test_dhcp_port_disabled_in_shadow(self):
ptg1 = self.create_policy_target_group(
name='ptg1')['policy_target_group']
shadow_net1 = self._get_ptg_shadow_net(ptg1)
shadow_subnet1 = self._get_object('subnets',
shadow_net1['subnets'][0], self.api)
with self.port(subnet=shadow_subnet1,
device_owner='network:dhcp') as port:
port = self._get_object('ports', port['port']['id'], self.api)
self.assertFalse(port['port']['admin_state_up'])
self._update_resource(port['port']['id'], 'port',
expected_res_status=200, api=self.api,
admin_state_up=True)
port = self._get_object('ports', port['port']['id'], self.api)
self.assertFalse(port['port']['admin_state_up'])
class TestL2Policy(ApicMappingTestCase):
def _test_l2_policy_created_on_apic(self, shared=False):
l2p = self.create_l2_policy(name="l2p", shared=shared)['l2_policy']
tenant = self.common_tenant if shared else l2p['tenant_id']
mgr = self.driver.apic_manager
mgr.ensure_bd_created_on_apic.assert_called_once_with(
tenant, l2p['id'], ctx_owner=tenant, ctx_name=l2p['l3_policy_id'],
transaction=mock.ANY)
mgr.ensure_epg_created.assert_called_once_with(
tenant, amap.SHADOW_PREFIX + l2p['id'], bd_owner=tenant,
bd_name=l2p['id'], transaction=mock.ANY)
def test_l2_policy_created_on_apic(self):
self._test_l2_policy_created_on_apic()
def test_l2_policy_created_on_apic_shared(self):
self._test_l2_policy_created_on_apic(shared=True)
def _test_l2_policy_deleted_on_apic(self, shared=False):
l2p = self.create_l2_policy(name="l2p", shared=shared)['l2_policy']
req = self.new_delete_request('l2_policies', l2p['id'], self.fmt)
req.get_response(self.ext_api)
tenant = self.common_tenant if shared else l2p['tenant_id']
mgr = self.driver.apic_manager
mgr.delete_bd_on_apic.assert_called_once_with(
tenant, l2p['id'], transaction=mock.ANY)
mgr.delete_epg_for_network.assert_called_once_with(
tenant, amap.SHADOW_PREFIX + l2p['id'],
transaction=mock.ANY)
expected_calls = [
mock.call(amap.IMPLICIT_PREFIX + l2p['id'], owner=tenant,
transaction=mock.ANY),
mock.call(amap.SERVICE_PREFIX + l2p['id'], owner=tenant,
transaction=mock.ANY)]
self._check_call_list(expected_calls,
mgr.delete_contract.call_args_list)
def test_l2_policy_deleted_on_apic(self):
self._test_l2_policy_deleted_on_apic()
def test_l2_policy_deleted_on_apic_shared(self):
self._test_l2_policy_deleted_on_apic(shared=True)
def test_pre_existing_subnets_added(self):
with self.network() as net:
with self.subnet(network=net) as sub:
sub = sub['subnet']
l2p = self.create_l2_policy(
network_id=net['network']['id'])['l2_policy']
mgr = self.driver.apic_manager
mgr.ensure_subnet_created_on_apic.assert_called_with(
l2p['tenant_id'], l2p['id'],
sub['gateway_ip'] + '/' + sub['cidr'].split('/')[1],
transaction=mock.ANY)
ptg = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
self.assertEqual(ptg['subnets'], [sub['id']])
def test_reject_l3p_update(self):
l2p = self.create_l2_policy()['l2_policy']
new_l3p = self.create_l3_policy()['l3_policy']
res = self.update_l2_policy(l2p['id'], l3_policy_id=new_l3p['id'],
expected_res_status=400)
self.assertEqual('L3PolicyUpdateOfL2PolicyNotSupported',
res['NeutronError']['type'])
def test_subnet_deallocated(self):
l2p = self.create_l2_policy()['l2_policy']
ptg = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
subnet = netaddr.IPSet(
[self._show_subnet(x)['subnet']['cidr'] for x in ptg['subnets']])
self.delete_policy_target_group(ptg['id'])
l2p2 = self.create_l2_policy()['l2_policy']
ptg = self.create_policy_target_group(
l2_policy_id=l2p2['id'])['policy_target_group']
subnet2 = netaddr.IPSet(
[self._show_subnet(x)['subnet']['cidr'] for x in ptg['subnets']])
self.assertFalse(subnet & subnet2)
class TestL3Policy(ApicMappingTestCase):
def _test_l3_policy_created_on_apic(self, shared=False):
l3p = self.create_l3_policy(name="l3p", shared=shared)['l3_policy']
tenant = self.common_tenant if shared else l3p['tenant_id']
mgr = self.driver.apic_manager
mgr.ensure_context_enforced.assert_called_once_with(
tenant, l3p['id'])
def test_l3_policy_created_on_apic(self):
self._test_l3_policy_created_on_apic()
def test_l3_policy_created_on_apic_shared(self):
self._test_l3_policy_created_on_apic(shared=True)
def _test_l3_policy_deleted_on_apic(self, shared=False):
l3p = self.create_l3_policy(name="l3p", shared=shared)['l3_policy']
req = self.new_delete_request('l3_policies', l3p['id'], self.fmt)
req.get_response(self.ext_api)
tenant = self.common_tenant if shared else l3p['tenant_id']
mgr = self.driver.apic_manager
mgr.ensure_context_deleted.assert_called_once_with(
tenant, l3p['id'])
def test_l3_policy_deleted_on_apic(self):
self._test_l3_policy_deleted_on_apic()
def test_l3_policy_deleted_on_apic_shared(self):
self._test_l3_policy_deleted_on_apic(shared=True)
def _test_multiple_l3_policy_per_es(self, shared_es=False):
# Verify 2 L3P can be created on same ES if NAT is enabled
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(name='supported',
cidr='192.168.0.0/24', shared=shared_es)['external_segment']
self.create_l3_policy(external_segments={es['id']: ['']},
expected_res_status=201)['l3_policy']
res = self.create_l3_policy(
external_segments={es['id']: ['']},
expected_res_status=201 if self.nat_enabled else 400)
if self.nat_enabled:
es = self.show_external_segment(es['id'])['external_segment']
self.assertEqual(2, len(es['l3_policies']))
else:
self.assertEqual('OnlyOneL3PolicyIsAllowedPerExternalSegment',
res['NeutronError']['type'])
# Verify existing L3P updated to use used ES works if NAT is enabled
sneaky_l3p = self.create_l3_policy()['l3_policy']
self.update_l3_policy(
sneaky_l3p['id'],
expected_res_status=200 if self.nat_enabled else 400,
external_segments={es['id']: ['']})
if self.nat_enabled:
es = self.show_external_segment(es['id'])['external_segment']
self.assertEqual(3, len(es['l3_policies']))
else:
self.assertEqual('OnlyOneL3PolicyIsAllowedPerExternalSegment',
res['NeutronError']['type'])
def test_multiple_l3_policy_per_es(self):
self._test_multiple_l3_policy_per_es(shared_es=False)
def test_multiple_l3_policy_per_es_shared(self):
self._test_multiple_l3_policy_per_es(shared_es=True)
def test_one_l3_policy_ip_on_es(self):
# Verify L3P created with more than 1 IP on ES fails
es = self.create_external_segment(
cidr='192.168.0.0/24')['external_segment']
res = self.create_l3_policy(
external_segments={es['id']: ['192.168.0.2', '192.168.0.3']},
expected_res_status=400)
self.assertEqual('OnlyOneAddressIsAllowedPerExternalSegment',
res['NeutronError']['type'])
# Verify L3P updated to more than 1 IP on ES fails
sneaky_l3p = self.create_l3_policy(
external_segments={es['id']: ['192.168.0.2']},
expected_res_status=201)['l3_policy']
res = self.update_l3_policy(
sneaky_l3p['id'], expected_res_status=400,
external_segments={es['id']: ['192.168.0.2', '192.168.0.3']})
self.assertEqual('OnlyOneAddressIsAllowedPerExternalSegment',
res['NeutronError']['type'])
def test_router_interface_no_gateway(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24')['external_segment']
l3p = self.create_l3_policy(
external_segments={es['id']: ['169.254.0.42']},
expected_res_status=201)['l3_policy']
l2p = self.create_l2_policy(l3_policy_id=l3p['id'])['l2_policy']
ptg = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
l3p = self.show_l3_policy(l3p['id'])['l3_policy']
self.assertEqual(1, len(l3p['routers']))
subnet = self._show_subnet(ptg['subnets'][0])['subnet']
router_ports = self._list(
'ports',
query_params='device_id=%s' % l3p['routers'][0])['ports']
self.assertEqual(2, len(router_ports))
for port in router_ports:
self.assertEqual(1, len(port['fixed_ips']))
self.assertNotEqual(subnet['gateway_ip'],
port['fixed_ips'][0]['ip_address'])
# One of the two ports is in subnet
self.assertNotEqual(router_ports[0]['fixed_ips'][0]['subnet_id'],
router_ports[1]['fixed_ips'][0]['subnet_id'])
self.assertTrue(
router_ports[0]['fixed_ips'][0]['subnet_id'] == subnet['id'] or
router_ports[1]['fixed_ips'][0]['subnet_id'] == subnet['id'])
def _wrap_up_l3out_request(self, l3out_str, l3p_id, es_id, l3p_owner):
# try to simulate what the implementation does here also for UT purpose
request = {}
request['children'] = self.trimmed_l3out
request['attributes'] = {'rn': u'Shd-Sub'}
final_req = {}
final_req['l3extOut'] = request
final_req = jsonutils.dumps(final_req)
final_req = re.sub('Shd-Sub',
l3out_str % (l3p_id, es_id), final_req)
final_req = re.sub('test-tenant', l3p_owner, final_req)
final_req = re.sub('{},*', '', final_req)
return final_req
def _test_l3p_plugged_to_es_at_creation(self, shared_es,
shared_l3p, is_edge_nat=False):
# Verify L3P is correctly plugged to ES on APIC during create
self._mock_external_dict([('supported', '192.168.0.2/24')],
is_edge_nat)
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24',
shared=shared_es,
external_routes=[{'destination': '0.0.0.0/0',
'nexthop': '192.168.0.254'},
{'destination': '128.0.0.0/16',
'nexthop': None}])['external_segment']
owner = self.common_tenant if shared_es else es['tenant_id']
mgr = self.driver.apic_manager
mgr.ensure_epg_created.reset_mock()
mgr.set_contract_for_epg.reset_mock()
l3p = self.create_l3_policy(
name='myl3p',
shared=shared_l3p,
tenant_id=es['tenant_id'] if not shared_es else 'another_tenant',
external_segments={es['id']: []},
expected_res_status=201)['l3_policy']
self.assertEqual(1, len(l3p['external_segments'][es['id']]))
self.assertEqual('169.254.0.2', l3p['external_segments'][es['id']][0])
expected_epg_calls = []
expected_contract_calls = []
expected_nat_epg_tenant = owner
if self.nat_enabled and shared_es and self.driver.per_tenant_nat_epg:
expected_epg_calls.append(
mock.call(l3p['tenant_id'], "NAT-epg-%s" % es['id'],
bd_name="NAT-bd-%s" % es['id'], bd_owner=owner,
transaction=mock.ANY))
expected_contract_calls.extend([
mock.call(l3p['tenant_id'], "NAT-epg-%s" % es['id'],
"NAT-allow-%s" % es['id'], transaction=mock.ANY),
mock.call(l3p['tenant_id'], "NAT-epg-%s" % es['id'],
"NAT-allow-%s" % es['id'], provider=True,
transaction=mock.ANY)])
expected_nat_epg_tenant = l3p['tenant_id']
self._check_call_list(expected_epg_calls,
mgr.ensure_epg_created.call_args_list)
self._check_call_list(expected_contract_calls,
mgr.set_contract_for_epg.call_args_list)
ctx = context.get_admin_context()
ctx._plugin_context = ctx
self.assertEqual((expected_nat_epg_tenant, "NAT-epg-%s" % es['id']),
self.driver._determine_nat_epg_for_es(ctx, es, l3p))
l2ps = [self.create_l2_policy(name='myl2p-%s' % x,
tenant_id=l3p['tenant_id'],
shared=shared_l3p,
l3_policy_id=l3p['id'])['l2_policy']
for x in range(0, 3)]
l3p_owner = self.common_tenant if shared_l3p else l3p['tenant_id']
call_name = mgr.ensure_external_routed_network_created
l3out_str = "Shd-%s-%s"
if is_edge_nat:
l3out_str = "Auto-%s-%s"
if self.nat_enabled:
expected_l3out_calls = []
if not is_edge_nat or not self.pre_l3out:
expected_l3out_calls.append(
mock.call(l3out_str % (l3p['id'], es['id']),
owner=l3p_owner, context=l3p['id'],
transaction=mock.ANY))
if not self.pre_l3out:
expected_l3out_calls.append(
mock.call(es['id'], owner=owner,
context="NAT-vrf-%s" % es['id'],
transaction=mock.ANY))
elif not self.pre_l3out:
expected_l3out_calls = [
mock.call(es['id'], owner=owner, context=l3p['id'],
transaction=mock.ANY)]
else:
call_name = mgr.set_context_for_external_routed_network
expected_l3out_calls = [
mock.call(APIC_PRE_L3OUT_TENANT, es['name'], l3p['id'],
transaction=mock.ANY)]
self._check_call_list(expected_l3out_calls, call_name.call_args_list)
if is_edge_nat and self.nat_enabled:
(self.driver.l3out_vlan_alloc.
reserve_vlan.assert_called_once_with(
es['name'], l3p['id']))
if not self.pre_l3out:
expected_set_domain_calls = [
mock.call(es['id'], owner=owner, transaction=mock.ANY)]
expected_logic_node_calls = [
mock.call(es['id'], mocked.APIC_EXT_SWITCH,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT,
mocked.APIC_EXT_ENCAP, '192.168.0.2/24',
owner=owner, router_id=APIC_EXTERNAL_RID,
transaction=mock.ANY)]
expected_route_calls = [
mock.call(es['id'], mocked.APIC_EXT_SWITCH, '192.168.0.254',
owner=owner, subnet='0.0.0.0/0',
transaction=mock.ANY),
mock.call(es['id'], mocked.APIC_EXT_SWITCH, '192.168.0.1',
owner=owner, subnet='128.0.0.0/16',
transaction=mock.ANY)]
if is_edge_nat and self.nat_enabled:
expected_set_domain_calls.append(
mock.call(l3out_str % (l3p['id'], es['id']),
owner=l3p_owner, transaction=mock.ANY))
expected_logic_node_calls.append(
mock.call(l3out_str % (l3p['id'], es['id']),
mocked.APIC_EXT_SWITCH, mocked.APIC_EXT_MODULE,
mocked.APIC_EXT_PORT, mock.ANY, '192.168.0.2/24',
owner=l3p_owner, router_id=APIC_EXTERNAL_RID,
transaction=mock.ANY))
expected_route_calls.append(
mock.call(l3out_str % (l3p['id'], es['id']),
mocked.APIC_EXT_SWITCH, '192.168.0.254',
owner=l3p_owner, subnet='0.0.0.0/0',
transaction=mock.ANY))
expected_route_calls.append(
mock.call(l3out_str % (l3p['id'], es['id']),
mocked.APIC_EXT_SWITCH, '192.168.0.1',
owner=l3p_owner, subnet='128.0.0.0/16',
transaction=mock.ANY))
self._check_call_list(expected_set_domain_calls,
mgr.set_domain_for_external_routed_network.call_args_list)
self._check_call_list(expected_logic_node_calls,
mgr.ensure_logical_node_profile_created.call_args_list)
self._check_call_list(expected_route_calls,
mgr.ensure_static_route_created.call_args_list)
else:
if is_edge_nat and self.nat_enabled:
final_req = self._wrap_up_l3out_request(l3out_str,
l3p['id'], es['id'],
l3p_owner)
mgr.apic.post_body.assert_called_once_with(
mgr.apic.l3extOut.mo, final_req, l3p_owner,
l3out_str % (l3p['id'], es['id']))
self.assertFalse(mgr.set_domain_for_external_routed_network.called)
self.assertFalse(mgr.ensure_logical_node_profile_created.called)
self.assertFalse(mgr.ensure_static_route_created.called)
expected_set_l3out_for_bd_calls = []
if self.nat_enabled:
expected_set_l3out_for_bd_calls.append(
mock.call(owner, "NAT-bd-%s" % es['id'],
es['name' if self.pre_l3out else 'id'],
transaction=mock.ANY))
if is_edge_nat:
expected_set_l3out_for_bd_calls.extend([
mock.call(l3p_owner, l2p['id'],
l3out_str % (l3p['id'], es['id']),
transaction=mock.ANY) for l2p in l2ps])
else:
expected_set_l3out_for_bd_calls.extend([
mock.call(l3p_owner, l2p['id'],
es['name' if self.pre_l3out else 'id'],
transaction=mock.ANY) for l2p in l2ps])
self._check_call_list(expected_set_l3out_for_bd_calls,
mgr.set_l3out_for_bd.call_args_list)
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_l3p_plugged_to_es_at_creation_1(self):
self._test_l3p_plugged_to_es_at_creation(shared_es=True,
shared_l3p=False)
def test_l3p_plugged_to_es_at_creation_2(self):
self._test_l3p_plugged_to_es_at_creation(shared_es=True,
shared_l3p=True)
def test_l3p_plugged_to_es_at_creation_3(self):
self._test_l3p_plugged_to_es_at_creation(shared_es=False,
shared_l3p=False)
def test_l3p_plugged_to_es_at_creation_edge_nat_mode_1(self):
self._test_l3p_plugged_to_es_at_creation(shared_es=True,
shared_l3p=False,
is_edge_nat=True)
def test_l3p_plugged_to_es_at_creation_edge_nat_mode_2(self):
self._test_l3p_plugged_to_es_at_creation(shared_es=True,
shared_l3p=True,
is_edge_nat=True)
def test_l3p_plugged_to_es_at_creation_edge_nat_mode_3(self):
self._test_l3p_plugged_to_es_at_creation(shared_es=False,
shared_l3p=False,
is_edge_nat=True)
def test_l3p_plugged_to_es_at_creation_ptne_1(self):
self.driver.per_tenant_nat_epg = True
self._test_l3p_plugged_to_es_at_creation(shared_es=True,
shared_l3p=False)
def test_l3p_plugged_to_es_at_creation_ptne_2(self):
self.driver.per_tenant_nat_epg = True
self._test_l3p_plugged_to_es_at_creation(shared_es=True,
shared_l3p=True)
def _test_l3p_plugged_to_es_at_update(self, shared_es,
shared_l3p, is_edge_nat=False):
# Verify L3P is correctly plugged to ES on APIC during update
self._mock_external_dict([('supported', '192.168.0.2/24')],
is_edge_nat)
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24',
shared=shared_es,
external_routes=[{'destination': '0.0.0.0/0',
'nexthop': '192.168.0.254'},
{'destination': '128.0.0.0/16',
'nexthop': None}])['external_segment']
l3p = self.create_l3_policy(
name='myl3p',
expected_res_status=201,
tenant_id=es['tenant_id'] if not shared_es else 'another_tenant',
shared=shared_l3p)['l3_policy']
l2ps = [self.create_l2_policy(name='myl2p-%s' % x,
tenant_id=l3p['tenant_id'],
shared=shared_l3p,
l3_policy_id=l3p['id'])['l2_policy']
for x in range(0, 3)]
mgr = self.driver.apic_manager
mgr.ensure_epg_created.reset_mock()
mgr.set_contract_for_epg.reset_mock()
# update L3P with ES
l3p = self.update_l3_policy(l3p['id'], tenant_id=l3p['tenant_id'],
external_segments={es['id']: []},
expected_res_status=200)['l3_policy']
self.assertEqual(1, len(l3p['external_segments'][es['id']]))
self.assertEqual('169.254.0.2', l3p['external_segments'][es['id']][0])
owner = self.common_tenant if shared_es else es['tenant_id']
l3p_owner = self.common_tenant if shared_l3p else l3p['tenant_id']
l3out_str = "Shd-%s-%s"
if is_edge_nat:
l3out_str = "Auto-%s-%s"
expected_l3out_calls = []
call_name = mgr.ensure_external_routed_network_created
if self.nat_enabled:
if not is_edge_nat or not self.pre_l3out:
expected_l3out_calls.append(
mock.call(l3out_str % (l3p['id'], es['id']),
owner=l3p_owner, context=l3p['id'],
transaction=mock.ANY))
if not self.pre_l3out:
expected_l3out_calls.append(
mock.call(es['id'], owner=owner,
context="NAT-vrf-%s" % es['id'],
transaction=mock.ANY))
elif not self.pre_l3out:
expected_l3out_calls = [
mock.call(es['id'], owner=owner, context=l3p['id'],
transaction=mock.ANY)]
else:
call_name = mgr.set_context_for_external_routed_network
expected_l3out_calls = [
mock.call(APIC_PRE_L3OUT_TENANT, es['name'], l3p['id'],
transaction=mock.ANY)]
self._check_call_list(expected_l3out_calls, call_name.call_args_list)
if is_edge_nat and self.nat_enabled:
(self.driver.l3out_vlan_alloc.
reserve_vlan.assert_called_once_with(
es['name'], l3p['id']))
if not self.pre_l3out:
expected_set_domain_calls = [
mock.call(es['id'], owner=owner, transaction=mock.ANY)]
expected_logic_node_calls = [
mock.call(es['id'], mocked.APIC_EXT_SWITCH,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT,
mocked.APIC_EXT_ENCAP, '192.168.0.2/24',
owner=owner, router_id=APIC_EXTERNAL_RID,
transaction=mock.ANY)]
expected_route_calls = [
mock.call(es['id'], mocked.APIC_EXT_SWITCH,
'192.168.0.254', owner=owner, subnet='0.0.0.0/0',
transaction=mock.ANY),
mock.call(es['id'], mocked.APIC_EXT_SWITCH, '192.168.0.1',
owner=owner, subnet='128.0.0.0/16',
transaction=mock.ANY)]
if is_edge_nat and self.nat_enabled:
expected_set_domain_calls.append(
mock.call(l3out_str % (l3p['id'], es['id']),
owner=l3p_owner, transaction=mock.ANY))
expected_logic_node_calls.append(
mock.call(l3out_str % (l3p['id'], es['id']),
mocked.APIC_EXT_SWITCH, mocked.APIC_EXT_MODULE,
mocked.APIC_EXT_PORT, mock.ANY, '192.168.0.2/24',
owner=l3p_owner, router_id=APIC_EXTERNAL_RID,
transaction=mock.ANY))
expected_route_calls.append(
mock.call(l3out_str % (l3p['id'], es['id']),
mocked.APIC_EXT_SWITCH, '192.168.0.254',
owner=l3p_owner, subnet='0.0.0.0/0',
transaction=mock.ANY))
expected_route_calls.append(
mock.call(l3out_str % (l3p['id'], es['id']),
mocked.APIC_EXT_SWITCH, '192.168.0.1',
owner=l3p_owner, subnet='128.0.0.0/16',
transaction=mock.ANY))
self._check_call_list(expected_set_domain_calls,
mgr.set_domain_for_external_routed_network.call_args_list)
self._check_call_list(expected_logic_node_calls,
mgr.ensure_logical_node_profile_created.call_args_list)
self._check_call_list(expected_route_calls,
mgr.ensure_static_route_created.call_args_list)
else:
if is_edge_nat and self.nat_enabled:
final_req = self._wrap_up_l3out_request(l3out_str,
l3p['id'], es['id'],
l3p_owner)
mgr.apic.post_body.assert_called_once_with(
mgr.apic.l3extOut.mo, final_req, l3p_owner,
l3out_str % (l3p['id'], es['id']))
self.assertFalse(mgr.set_domain_for_external_routed_network.called)
self.assertFalse(mgr.ensure_logical_node_profile_created.called)
self.assertFalse(mgr.ensure_static_route_created.called)
expected_set_l3out_for_bd_calls = []
if self.nat_enabled:
expected_set_l3out_for_bd_calls.append(
mock.call(owner, "NAT-bd-%s" % es['id'],
es['name' if self.pre_l3out else 'id'],
transaction=mock.ANY))
if is_edge_nat:
expected_set_l3out_for_bd_calls.extend([
mock.call(l3p_owner, l2p['id'],
l3out_str % (l3p['id'], es['id'])
) for l2p in l2ps])
else:
expected_set_l3out_for_bd_calls.extend([
mock.call(l3p_owner, l2p['id'],
es['name' if self.pre_l3out else 'id'])
for l2p in l2ps])
self._check_call_list(expected_set_l3out_for_bd_calls,
mgr.set_l3out_for_bd.call_args_list)
expected_epg_calls = []
expected_contract_calls = []
expected_nat_epg_tenant = owner
if self.nat_enabled and shared_es and self.driver.per_tenant_nat_epg:
expected_epg_calls.append(
mock.call(l3p['tenant_id'], "NAT-epg-%s" % es['id'],
bd_name="NAT-bd-%s" % es['id'], bd_owner=owner,
transaction=mock.ANY))
expected_contract_calls.extend([
mock.call(l3p['tenant_id'], "NAT-epg-%s" % es['id'],
"NAT-allow-%s" % es['id'], transaction=mock.ANY),
mock.call(l3p['tenant_id'], "NAT-epg-%s" % es['id'],
"NAT-allow-%s" % es['id'], provider=True,
transaction=mock.ANY)])
expected_nat_epg_tenant = l3p['tenant_id']
self._check_call_list(expected_epg_calls,
mgr.ensure_epg_created.call_args_list)
self._check_call_list(expected_contract_calls,
mgr.set_contract_for_epg.call_args_list)
ctx = context.get_admin_context()
ctx._plugin_context = ctx
self.assertEqual((expected_nat_epg_tenant, "NAT-epg-%s" % es['id']),
self.driver._determine_nat_epg_for_es(ctx, es, l3p))
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_l3p_plugged_to_es_at_update_1(self):
self._test_l3p_plugged_to_es_at_update(shared_es=True,
shared_l3p=False)
def test_l3p_plugged_to_es_at_update_2(self):
self._test_l3p_plugged_to_es_at_update(shared_es=True,
shared_l3p=True)
def test_l3p_plugged_to_es_at_update_3(self):
self._test_l3p_plugged_to_es_at_update(shared_es=False,
shared_l3p=False)
def test_l3p_plugged_to_es_at_update_edge_nat_mode_1(self):
self._test_l3p_plugged_to_es_at_update(shared_es=True,
shared_l3p=False,
is_edge_nat=True)
def test_l3p_plugged_to_es_at_update_edge_nat_mode_2(self):
self._test_l3p_plugged_to_es_at_update(shared_es=True,
shared_l3p=True,
is_edge_nat=True)
def test_l3p_plugged_to_es_at_update_edge_nat_mode_3(self):
self._test_l3p_plugged_to_es_at_update(shared_es=False,
shared_l3p=False,
is_edge_nat=True)
def test_l3p_plugged_to_es_at_update_ptne_1(self):
self.driver.per_tenant_nat_epg = True
self._test_l3p_plugged_to_es_at_update(shared_es=True,
shared_l3p=False)
def test_l3p_plugged_to_es_at_update_ptne_2(self):
self.driver.per_tenant_nat_epg = True
self._test_l3p_plugged_to_es_at_update(shared_es=True,
shared_l3p=True)
def _test_l3p_unplugged_from_es_on_delete(self, shared_es,
shared_l3p, is_edge_nat=False):
self._mock_external_dict([('supported1', '192.168.0.2/24'),
('supported2', '192.168.1.2/24')],
is_edge_nat)
es1 = self.create_external_segment(
name='supported1', cidr='192.168.0.0/24', shared=shared_es,
external_routes=[{'destination': '0.0.0.0/0',
'nexthop': '192.168.0.254'},
{'destination': '128.0.0.0/16',
'nexthop': None}])['external_segment']
es2 = self.create_external_segment(
shared=shared_es, name='supported2',
cidr='192.168.1.0/24')['external_segment']
l3p = self.create_l3_policy(shared=shared_l3p,
tenant_id=es1['tenant_id'] if not shared_es else 'another_tenant',
external_segments={es1['id']: ['169.254.0.3']},
expected_res_status=201)['l3_policy']
mgr = self.driver.apic_manager
mgr.set_context_for_external_routed_network.reset_mock()
req = self.new_delete_request('l3_policies', l3p['id'], self.fmt)
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
owner = self.common_tenant if shared_es else es1['tenant_id']
l3p_owner = self.common_tenant if shared_l3p else l3p['tenant_id']
expected_delete_calls = []
if not self.pre_l3out:
expected_delete_calls.append(
mock.call(es1['id'], owner=owner, transaction=mock.ANY))
if self.nat_enabled:
l3out_str = "Shd-%s-%s"
if is_edge_nat:
l3out_str = "Auto-%s-%s"
expected_delete_calls.append(
mock.call(l3out_str % (l3p['id'], es1['id']),
owner=l3p_owner, transaction=mock.ANY))
self._check_call_list(
expected_delete_calls,
mgr.delete_external_routed_network.call_args_list)
if self.nat_enabled:
mgr.unset_l3out_for_bd.assert_called_once_with(owner,
"NAT-bd-%s" % es1['id'],
es1['name' if self.pre_l3out else 'id'], transaction=mock.ANY)
if self.pre_l3out and not self.nat_enabled:
call_name = mgr.set_context_for_external_routed_network
call_name.assert_called_once_with(APIC_PRE_L3OUT_TENANT,
es1['name'], None, transaction=mock.ANY)
if is_edge_nat and self.nat_enabled:
self.driver.l3out_vlan_alloc.release_vlan.assert_called_once_with(
es1['name'], l3p['id'])
mgr.delete_external_routed_network.reset_mock()
mgr.unset_l3out_for_bd.reset_mock()
self.driver.l3out_vlan_alloc.release_vlan.reset_mock()
expected_epg_calls = []
if self.nat_enabled and shared_es and self.driver.per_tenant_nat_epg:
expected_epg_calls.append(
mock.call(l3p['tenant_id'], "NAT-epg-%s" % es1['id'],
transaction=mock.ANY))
self._check_call_list(expected_epg_calls,
mgr.delete_epg_for_network.call_args_list)
ctx = context.get_admin_context()
ctx._plugin_context = ctx
self.assertEqual((owner, "NAT-epg-%s" % es1['id']),
self.driver._determine_nat_epg_for_es(ctx, es1, l3p))
# Verify correct deletion for 2 ESs
l3p = self.create_l3_policy(
shared=shared_l3p,
tenant_id=es1['tenant_id'] if not shared_es else 'another_tenant',
external_segments={es1['id']: ['169.254.0.3'],
es2['id']: ['169.254.0.3']},
expected_res_status=201)['l3_policy']
mgr.set_context_for_external_routed_network.reset_mock()
mgr.delete_epg_for_network.reset_mock()
req = self.new_delete_request('l3_policies', l3p['id'], self.fmt)
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
expected_delete_calls = []
if not self.pre_l3out:
expected_delete_calls.extend([
mock.call(es1['id'], owner=owner, transaction=mock.ANY),
mock.call(es2['id'], owner=owner, transaction=mock.ANY)])
if self.nat_enabled:
l3out_str = "Shd-%s-%s"
if is_edge_nat:
l3out_str = "Auto-%s-%s"
expected_delete_calls.extend([
mock.call(l3out_str % (l3p['id'], es1['id']),
owner=l3p_owner, transaction=mock.ANY),
mock.call(l3out_str % (l3p['id'], es2['id']),
owner=l3p_owner, transaction=mock.ANY)])
self._check_call_list(
expected_delete_calls,
mgr.delete_external_routed_network.call_args_list)
if self.nat_enabled:
expected_unset_calls = [
mock.call(owner, "NAT-bd-%s" % es1['id'],
es1['name' if self.pre_l3out else 'id'],
transaction=mock.ANY),
mock.call(owner, "NAT-bd-%s" % es2['id'],
es2['name' if self.pre_l3out else 'id'],
transaction=mock.ANY)]
self._check_call_list(
expected_unset_calls, mgr.unset_l3out_for_bd.call_args_list)
if self.pre_l3out and not self.nat_enabled:
expected_calls = [
mock.call(APIC_PRE_L3OUT_TENANT,
es1['name'], None, transaction=mock.ANY),
mock.call(APIC_PRE_L3OUT_TENANT,
es2['name'], None, transaction=mock.ANY)]
self._check_call_list(
expected_calls,
mgr.set_context_for_external_routed_network.call_args_list)
if is_edge_nat and self.nat_enabled:
expected_release_vlan_calls = [mock.call(es1['name'], l3p['id']),
mock.call(es2['name'], l3p['id'])]
self._check_call_list(
expected_release_vlan_calls,
self.driver.l3out_vlan_alloc.release_vlan.call_args_list)
if self.nat_enabled and shared_es and self.driver.per_tenant_nat_epg:
expected_epg_calls.append(
mock.call(l3p['tenant_id'], "NAT-epg-%s" % es2['id'],
transaction=mock.ANY))
self._check_call_list(expected_epg_calls,
mgr.delete_epg_for_network.call_args_list)
self.assertEqual((owner, "NAT-epg-%s" % es2['id']),
self.driver._determine_nat_epg_for_es(ctx, es2, l3p))
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_l3p_unplugged_from_es_on_delete_1(self):
self._test_l3p_unplugged_from_es_on_delete(shared_es=True,
shared_l3p=False)
def test_l3p_unplugged_from_es_on_delete_2(self):
self._test_l3p_unplugged_from_es_on_delete(shared_es=True,
shared_l3p=True)
def test_l3p_unplugged_from_es_on_delete_3(self):
self._test_l3p_unplugged_from_es_on_delete(shared_es=False,
shared_l3p=False)
def test_l3p_unplugged_from_es_on_delete_edge_nat_mode_1(self):
self._test_l3p_unplugged_from_es_on_delete(shared_es=True,
shared_l3p=False,
is_edge_nat=True)
def test_l3p_unplugged_from_es_on_delete_edge_nat_mode_2(self):
self._test_l3p_unplugged_from_es_on_delete(shared_es=True,
shared_l3p=True,
is_edge_nat=True)
def test_l3p_unplugged_from_es_on_delete_edge_nat_mode_3(self):
self._test_l3p_unplugged_from_es_on_delete(shared_es=False,
shared_l3p=False,
is_edge_nat=True)
def test_l3p_unplugged_from_es_on_delete_ptne_1(self):
self.per_tenant_nat_epg = True
self._test_l3p_unplugged_from_es_on_delete(shared_es=True,
shared_l3p=False)
def test_l3p_unplugged_from_es_on_delete_ptne_2(self):
self.per_tenant_nat_epg = True
self._test_l3p_unplugged_from_es_on_delete(shared_es=True,
shared_l3p=True)
def _test_l3p_unplugged_from_es_on_update(self, shared_es,
shared_l3p, is_edge_nat=False):
self._mock_external_dict([('supported1', '192.168.0.2/24'),
('supported', '192.168.1.2/24')],
is_edge_nat)
es1 = self.create_external_segment(
name='supported1', cidr='192.168.0.0/24', shared=shared_es,
external_routes=[{'destination': '0.0.0.0/0',
'nexthop': '192.168.0.254'},
{'destination': '128.0.0.0/16',
'nexthop': None}])['external_segment']
es2 = self.create_external_segment(
shared=shared_es,
name='supported', cidr='192.168.1.0/24')['external_segment']
l3p = self.create_l3_policy(
name='myl3p',
tenant_id=es1['tenant_id'] if not shared_es else 'another_tenant',
shared=shared_l3p,
external_segments={es1['id']: ['169.254.0.3']},
expected_res_status=201)['l3_policy']
l2ps = [self.create_l2_policy(name='myl2p-%s' % x,
tenant_id=l3p['tenant_id'],
shared=shared_l3p,
l3_policy_id=l3p['id'])['l2_policy']
for x in range(0, 3)]
mgr = self.driver.apic_manager
owner = self.common_tenant if shared_es else es1['tenant_id']
l3p_owner = self.common_tenant if shared_l3p else l3p['tenant_id']
mgr.ensure_external_routed_network_created.reset_mock()
mgr.set_domain_for_external_routed_network.reset_mock()
mgr.ensure_logical_node_profile_created.reset_mock()
mgr.ensure_static_route_created.reset_mock()
self.driver.l3out_vlan_alloc.reserve_vlan.reset_mock()
mgr.apic.post_body.reset_mock()
mgr.set_context_for_external_routed_network.reset_mock()
mgr.set_l3out_for_bd.reset_mock()
l3p = self.update_l3_policy(
l3p['id'], tenant_id=l3p['tenant_id'], expected_res_status=200,
external_segments={es2['id']: ['169.254.0.4']})['l3_policy']
l3out_str = "Shd-%s-%s"
if is_edge_nat:
l3out_str = "Auto-%s-%s"
expected_delete_calls = []
if not self.pre_l3out:
expected_delete_calls.append(
mock.call(es1['id'], owner=owner, transaction=mock.ANY))
if self.nat_enabled:
expected_delete_calls.append(
mock.call(l3out_str % (l3p['id'], es1['id']),
owner=l3p_owner, transaction=mock.ANY))
self._check_call_list(
expected_delete_calls,
mgr.delete_external_routed_network.call_args_list)
if self.pre_l3out and not self.nat_enabled:
expected_calls = [
mock.call(APIC_PRE_L3OUT_TENANT,
es1['name'], None, transaction=mock.ANY),
mock.call(APIC_PRE_L3OUT_TENANT,
es2['name'], l3p['id'], transaction=mock.ANY)]
self._check_call_list(
expected_calls,
mgr.set_context_for_external_routed_network.call_args_list)
expected_unset_l3out_for_bd_calls = []
if self.nat_enabled:
if is_edge_nat:
expected_unset_l3out_for_bd_calls.extend([
mock.call(l3p_owner, l2p['id'],
l3out_str % (l3p['id'], es1['id'])
) for l2p in l2ps])
expected_unset_l3out_for_bd_calls.append(
mock.call(owner, "NAT-bd-%s" % es1['id'],
es1['name' if self.pre_l3out else 'id'],
transaction=mock.ANY))
else:
expected_unset_l3out_for_bd_calls.extend([
mock.call(l3p_owner, l2p['id'],
es1['name' if self.pre_l3out else 'id'])
for l2p in l2ps])
self._check_call_list(expected_unset_l3out_for_bd_calls,
mgr.unset_l3out_for_bd.call_args_list)
if is_edge_nat and self.nat_enabled:
self.driver.l3out_vlan_alloc.release_vlan.assert_called_once_with(
es1['name'], l3p['id'])
expected_l3out_calls = []
if self.nat_enabled:
if not is_edge_nat or not self.pre_l3out:
expected_l3out_calls.append(
mock.call(l3out_str % (l3p['id'], es2['id']),
owner=l3p_owner, context=l3p['id'],
transaction=mock.ANY))
if not self.pre_l3out:
expected_l3out_calls.append(
mock.call(es2['id'], owner=owner,
context="NAT-vrf-%s" % es2['id'],
transaction=mock.ANY))
elif not self.pre_l3out:
expected_l3out_calls = [
mock.call(es2['id'], owner=owner, context=l3p['id'],
transaction=mock.ANY)]
self._check_call_list(expected_l3out_calls,
mgr.ensure_external_routed_network_created.call_args_list)
if is_edge_nat and self.nat_enabled:
(self.driver.l3out_vlan_alloc.
reserve_vlan.assert_called_once_with(
es2['name'], l3p['id']))
if not self.pre_l3out:
expected_set_domain_calls = [
mock.call(es2['id'], owner=owner, transaction=mock.ANY)]
expected_logic_node_calls = [
mock.call(es2['id'], mocked.APIC_EXT_SWITCH,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT,
mocked.APIC_EXT_ENCAP, '192.168.1.2/24',
owner=owner, router_id=APIC_EXTERNAL_RID,
transaction=mock.ANY)]
if is_edge_nat and self.nat_enabled:
expected_set_domain_calls.append(
mock.call(l3out_str % (l3p['id'], es2['id']),
owner=l3p_owner, transaction=mock.ANY))
expected_logic_node_calls.append(
mock.call(l3out_str % (l3p['id'], es2['id']),
mocked.APIC_EXT_SWITCH, mocked.APIC_EXT_MODULE,
mocked.APIC_EXT_PORT, mock.ANY, '192.168.1.2/24',
owner=l3p_owner, router_id=APIC_EXTERNAL_RID,
transaction=mock.ANY))
self._check_call_list(expected_set_domain_calls,
mgr.set_domain_for_external_routed_network.call_args_list)
self._check_call_list(expected_logic_node_calls,
mgr.ensure_logical_node_profile_created.call_args_list)
else:
if is_edge_nat and self.nat_enabled:
final_req = self._wrap_up_l3out_request(l3out_str,
l3p['id'], es2['id'],
l3p_owner)
mgr.apic.post_body.assert_called_once_with(
mgr.apic.l3extOut.mo, final_req, l3p_owner,
l3out_str % (l3p['id'], es2['id']))
self.assertFalse(mgr.set_domain_for_external_routed_network.called)
self.assertFalse(mgr.ensure_logical_node_profile_created.called)
self.assertFalse(mgr.ensure_static_route_created.called)
expected_set_l3out_for_bd_calls = []
if self.nat_enabled:
expected_set_l3out_for_bd_calls.append(
mock.call(owner, "NAT-bd-%s" % es2['id'],
es2['name' if self.pre_l3out else 'id'],
transaction=mock.ANY))
if is_edge_nat:
expected_set_l3out_for_bd_calls.extend([
mock.call(l3p_owner, l2p['id'],
l3out_str % (l3p['id'], es2['id'])
) for l2p in l2ps])
else:
expected_set_l3out_for_bd_calls.extend([
mock.call(l3p_owner, l2p['id'],
es2['name' if self.pre_l3out else 'id'])
for l2p in l2ps])
self._check_call_list(expected_set_l3out_for_bd_calls,
mgr.set_l3out_for_bd.call_args_list)
expected_epg_calls = []
if self.nat_enabled and shared_es and self.driver.per_tenant_nat_epg:
expected_epg_calls.append(
mock.call(l3p['tenant_id'], "NAT-epg-%s" % es1['id'],
transaction=mock.ANY))
self._check_call_list(expected_epg_calls,
mgr.delete_epg_for_network.call_args_list)
ctx = context.get_admin_context()
ctx._plugin_context = ctx
self.assertEqual((owner, "NAT-epg-%s" % es1['id']),
self.driver._determine_nat_epg_for_es(ctx, es1, l3p))
self.driver.l3out_vlan_alloc.release_vlan.reset_mock()
mgr.delete_external_routed_network.reset_mock()
mgr.unset_l3out_for_bd.reset_mock()
self.update_l3_policy(
l3p['id'], expected_res_status=200, tenant_id=l3p['tenant_id'],
external_segments={es1['id']: ['169.254.0.5'],
es2['id']: ['169.254.0.6']})
mgr.set_context_for_external_routed_network.reset_mock()
mgr.delete_epg_for_network.reset_mock()
self.update_l3_policy(
l3p['id'], tenant_id=l3p['tenant_id'],
expected_res_status=200, external_segments={})
expected_delete_calls = []
if not self.pre_l3out:
expected_delete_calls.extend([
mock.call(es1['id'], owner=owner, transaction=mock.ANY),
mock.call(es2['id'], owner=owner, transaction=mock.ANY)])
if self.nat_enabled:
expected_delete_calls.extend([
mock.call(l3out_str % (l3p['id'], es1['id']),
owner=l3p_owner, transaction=mock.ANY),
mock.call(l3out_str % (l3p['id'], es2['id']),
owner=l3p_owner, transaction=mock.ANY)])
self._check_call_list(
expected_delete_calls,
mgr.delete_external_routed_network.call_args_list)
expected_unset_l3out_for_bd_calls = []
if self.nat_enabled:
if is_edge_nat:
expected_unset_l3out_for_bd_calls.extend([
mock.call(l3p_owner, l2p['id'],
l3out_str % (l3p['id'], es1['id'])
) for l2p in l2ps])
expected_unset_l3out_for_bd_calls.extend([
mock.call(l3p_owner, l2p['id'],
l3out_str % (l3p['id'], es2['id'])
) for l2p in l2ps])
expected_unset_l3out_for_bd_calls.append(
mock.call(owner, "NAT-bd-%s" % es1['id'],
es1['name' if self.pre_l3out else 'id'],
transaction=mock.ANY))
expected_unset_l3out_for_bd_calls.append(
mock.call(owner, "NAT-bd-%s" % es2['id'],
es2['name' if self.pre_l3out else 'id'],
transaction=mock.ANY))
else:
expected_unset_l3out_for_bd_calls.extend([
mock.call(l3p_owner, l2p['id'],
es1['name' if self.pre_l3out else 'id'])
for l2p in l2ps])
expected_unset_l3out_for_bd_calls.extend([
mock.call(l3p_owner, l2p['id'],
es2['name' if self.pre_l3out else 'id'])
for l2p in l2ps])
self._check_call_list(expected_unset_l3out_for_bd_calls,
mgr.unset_l3out_for_bd.call_args_list)
if self.pre_l3out and not self.nat_enabled:
expected_calls = [
mock.call(APIC_PRE_L3OUT_TENANT,
es1['name'], None, transaction=mock.ANY),
mock.call(APIC_PRE_L3OUT_TENANT,
es2['name'], None, transaction=mock.ANY)]
self._check_call_list(
expected_calls,
mgr.set_context_for_external_routed_network.call_args_list)
if is_edge_nat and self.nat_enabled:
expected_release_vlan_calls = [mock.call(es1['name'], l3p['id']),
mock.call(es2['name'], l3p['id'])]
self._check_call_list(
expected_release_vlan_calls,
self.driver.l3out_vlan_alloc.release_vlan.call_args_list)
if self.nat_enabled and shared_es and self.driver.per_tenant_nat_epg:
expected_epg_calls.append(
mock.call(l3p['tenant_id'], "NAT-epg-%s" % es2['id'],
transaction=mock.ANY))
self._check_call_list(expected_epg_calls,
mgr.delete_epg_for_network.call_args_list)
self.assertEqual((owner, "NAT-epg-%s" % es2['id']),
self.driver._determine_nat_epg_for_es(ctx, es2, l3p))
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_l3p_unplugged_from_es_on_update_1(self):
self._test_l3p_unplugged_from_es_on_update(shared_es=True,
shared_l3p=False)
def test_l3p_unplugged_from_es_on_update_2(self):
self._test_l3p_unplugged_from_es_on_update(shared_es=True,
shared_l3p=True)
def test_l3p_unplugged_from_es_on_update_3(self):
self._test_l3p_unplugged_from_es_on_update(shared_es=False,
shared_l3p=False)
def test_l3p_unplugged_from_es_on_update_edge_nat_mode_1(self):
self._test_l3p_unplugged_from_es_on_update(shared_es=True,
shared_l3p=False,
is_edge_nat=True)
def test_l3p_unplugged_from_es_on_update_edge_nat_mode_2(self):
self._test_l3p_unplugged_from_es_on_update(shared_es=True,
shared_l3p=True,
is_edge_nat=True)
def test_l3p_unplugged_from_es_on_update_edge_nat_mode_3(self):
self._test_l3p_unplugged_from_es_on_update(shared_es=False,
shared_l3p=False,
is_edge_nat=True)
def test_l3p_unplugged_from_es_on_update_ptne_1(self):
self.driver.per_tenant_nat_epg = True
self._test_l3p_unplugged_from_es_on_update(shared_es=True,
shared_l3p=False)
def test_l3p_unplugged_from_es_on_update_ptne_2(self):
self.driver.per_tenant_nat_epg = True
self._test_l3p_unplugged_from_es_on_update(shared_es=True,
shared_l3p=True)
def test_verify_unsupported_es_noop(self):
# Verify L3P is correctly plugged to ES on APIC during update
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='unsupported', cidr='192.168.0.0/24')['external_segment']
self.create_l3_policy(
external_segments={es['id']: ['192.168.0.3']},
expected_res_status=201)
mgr = self.driver.apic_manager
self.assertFalse(mgr.ensure_external_routed_network_created.called)
self.assertFalse(mgr.set_domain_for_external_routed_network.called)
self.assertFalse(mgr.ensure_logical_node_profile_created.called)
self.assertFalse(mgr.ensure_static_route_created.called)
def test_l3p_external_address(self):
# Verify auto allocated IP address is assigned to L3P when no
# explicit address is configured
self._mock_external_dict([('supported1', '192.168.0.2/24'),
('supported2', '192.168.1.2/24')])
es1 = self.create_external_segment(
name='supported1', cidr='192.168.0.0/24')['external_segment']
es2 = self.create_external_segment(
name='supported2', cidr='192.168.1.0/24')['external_segment']
l3p = self.create_l3_policy(
external_segments={es1['id']: []},
expected_res_status=201)['l3_policy']
self.assertEqual(['169.254.0.2'], l3p['external_segments'][es1['id']])
l3p = self.update_l3_policy(
l3p['id'], expected_res_status=200,
external_segments={es1['id']: [], es2['id']: []})['l3_policy']
self.assertEqual(['169.254.0.2'], l3p['external_segments'][es1['id']])
self.assertEqual(['169.254.0.2'], l3p['external_segments'][es2['id']])
# Address IP changed
l3p = self.update_l3_policy(
l3p['id'], expected_res_status=200,
external_segments={es1['id']: ['169.254.0.3'],
es2['id']: []})['l3_policy']
self.assertEqual(['169.254.0.3'], l3p['external_segments'][es1['id']])
self.assertEqual(['169.254.0.2'], l3p['external_segments'][es2['id']])
def _test_multi_es_with_ptg(self, shared_es):
self._mock_external_dict([('supported1', '192.168.0.2/24'),
('supported2', '192.168.1.2/24')])
es1 = self.create_external_segment(shared=shared_es,
name='supported1', cidr='192.168.0.0/24')['external_segment']
es2 = self.create_external_segment(shared=shared_es,
name='supported2', cidr='192.168.1.0/24')['external_segment']
l3p = self.create_l3_policy(
external_segments={es1['id']: [], es2['id']: []},
expected_res_status=201)['l3_policy']
l2p = self.create_l2_policy(l3_policy_id=l3p['id'])['l2_policy']
ptg = self.create_policy_target_group(name="ptg",
l2_policy_id=l2p['id'],
expected_res_status=201)['policy_target_group']
res = self.new_delete_request('policy_target_groups', ptg['id'],
self.fmt).get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
def test_multi_es_with_ptg_1(self):
self._test_multi_es_with_ptg(False)
def test_multi_es_with_ptg_2(self):
self._test_multi_es_with_ptg(True)
def test_multi_l3p_ptne(self):
self.driver.per_tenant_nat_epg = True
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='supported', shared=True)['external_segment']
mgr = self.driver.apic_manager
mgr.ensure_epg_created.reset_mock()
l3ps = []
for x in range(0, 3 if self.nat_enabled else 1):
l3ps.append(self.create_l3_policy(
name='myl3p-%s' % x, tenant_id='another_tenant',
external_segments={es['id']: []},
expected_res_status=201)['l3_policy'])
if self.nat_enabled:
mgr.ensure_epg_created.assert_called_once_with(
'another_tenant', "NAT-epg-%s" % es['id'],
bd_name="NAT-bd-%s" % es['id'],
bd_owner=self.common_tenant, transaction=mock.ANY)
else:
mgr.ensure_epg_created.assert_not_called()
for l3p in l3ps[:-1]:
self.delete_l3_policy(l3p['id'], tenant_id=l3p['tenant_id'])
mgr.delete_epg_for_network.assert_not_called()
self.delete_l3_policy(l3ps[-1]['id'], tenant_id=l3ps[-1]['tenant_id'])
if self.nat_enabled:
mgr.delete_epg_for_network.assert_called_once_with(
'another_tenant', "NAT-epg-%s" % es['id'],
transaction=mock.ANY)
else:
mgr.delete_epg_for_network.assert_not_called()
def test_ptne_upgrade(self):
# Simulate "upgrade" - tenants existing before upgrade should
# continue using non-specific NAT EPG where as new ones use
# specific NAT EPGs
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='supported', shared=True)['external_segment']
mgr = self.driver.apic_manager
mgr.ensure_epg_created.reset_mock()
ctx = context.get_admin_context()
ctx._plugin_context = ctx
l3p_a_1 = self.create_l3_policy(
name='myl3p-a-1', tenant_id='tenant_a',
external_segments={es['id']: []})['l3_policy']
mgr.ensure_epg_created.assert_not_called()
self.assertEqual((self.common_tenant, "NAT-epg-%s" % es['id']),
self.driver._determine_nat_epg_for_es(ctx, es, l3p_a_1))
# "Upgrade" and change to per-tenant NAT EPG
self.driver.per_tenant_nat_epg = True
if self.nat_enabled:
l3p_a_2 = self.create_l3_policy(
name='myl3p-a-2', tenant_id='tenant_a',
external_segments={es['id']: []})['l3_policy']
mgr.ensure_epg_created.assert_not_called()
self.assertEqual((self.common_tenant, "NAT-epg-%s" % es['id']),
self.driver._determine_nat_epg_for_es(ctx, es, l3p_a_2))
self.delete_l3_policy(l3p_a_2['id'],
tenant_id=l3p_a_2['tenant_id'])
self.assertEqual((self.common_tenant, "NAT-epg-%s" % es['id']),
self.driver._determine_nat_epg_for_es(ctx, es, l3p_a_1))
self.delete_l3_policy(l3p_a_1['id'], tenant_id=l3p_a_1['tenant_id'])
mgr.delete_epg_for_network.assert_not_called()
l3p_a_3 = self.create_l3_policy(
name='myl3p-a-3', tenant_id='tenant_a',
external_segments={es['id']: []})['l3_policy']
if self.nat_enabled:
mgr.ensure_epg_created.assert_called_once_with(
'tenant_a', "NAT-epg-%s" % es['id'],
bd_name="NAT-bd-%s" % es['id'], bd_owner=self.common_tenant,
transaction=mock.ANY)
self.assertEqual(('tenant_a', "NAT-epg-%s" % es['id']),
self.driver._determine_nat_epg_for_es(ctx, es, l3p_a_3))
else:
mgr.ensure_epg_created.assert_not_called()
self.delete_l3_policy(l3p_a_3['id'], tenant_id=l3p_a_3['tenant_id'])
mgr.ensure_epg_created.reset_mock()
l3p_b_1 = self.create_l3_policy(
name='myl3p-b-1', tenant_id='tenant_b',
external_segments={es['id']: []})['l3_policy']
if self.nat_enabled:
mgr.ensure_epg_created.assert_called_once_with(
'tenant_b', "NAT-epg-%s" % es['id'],
bd_name="NAT-bd-%s" % es['id'], bd_owner=self.common_tenant,
transaction=mock.ANY)
self.assertEqual(('tenant_b', "NAT-epg-%s" % es['id']),
self.driver._determine_nat_epg_for_es(ctx, es, l3p_b_1))
else:
mgr.ensure_epg_created.assert_not_called()
class TestL3PolicyNoNat(TestL3Policy):
def setUp(self):
super(TestL3PolicyNoNat, self).setUp(nat_enabled=False)
class TestL3PolicyPreL3Out(TestL3Policy):
def setUp(self):
super(TestL3PolicyPreL3Out, self).setUp(pre_existing_l3out=True)
class TestL3PolicyNoNatPreL3Out(TestL3Policy):
def setUp(self):
super(TestL3PolicyNoNatPreL3Out, self).setUp(
nat_enabled=False, pre_existing_l3out=True)
class TestPolicyRuleSet(ApicMappingTestCase):
# TODO(ivar): verify rule intersection with hierarchical PRS happens
# on APIC
def _test_policy_rule_set_created_on_apic(self, shared=False):
ct = self.create_policy_rule_set(name="ctr",
shared=shared)['policy_rule_set']
tenant = self.common_tenant if shared else ct['tenant_id']
mgr = self.driver.apic_manager
mgr.create_contract.assert_called_once_with(
ct['id'], owner=tenant, transaction=mock.ANY)
def test_policy_rule_set_created_on_apic(self):
self._test_policy_rule_set_created_on_apic()
def test_policy_rule_set_created_on_apic_shared(self):
self._test_policy_rule_set_created_on_apic(shared=True)
def _test_policy_rule_set_created_with_rules(self, shared=False):
bi, in_d, out = range(3)
rules = self._create_3_direction_rules(shared=shared)
# exclude BI rule for now
ctr = self.create_policy_rule_set(
name="ctr", policy_rules=[x['id'] for x in rules[1:]])[
'policy_rule_set']
rule_owner = self.common_tenant if shared else rules[0]['tenant_id']
# Verify that the in-out rules are correctly enforced on the APIC
mgr = self.driver.apic_manager
expected_calls = [
mock.call(ctr['id'], ctr['id'], rules[in_d]['id'],
owner=ctr['tenant_id'], transaction=mock.ANY,
unset=False, rule_owner=rule_owner),
mock.call(ctr['id'], ctr['id'],
amap.REVERSE_PREFIX + rules[out]['id'],
owner=ctr['tenant_id'], transaction=mock.ANY,
unset=False, rule_owner=rule_owner)]
self._check_call_list(
expected_calls,
mgr.manage_contract_subject_in_filter.call_args_list)
expected_calls = [
mock.call(ctr['id'], ctr['id'], rules[out]['id'],
owner=ctr['tenant_id'], transaction=mock.ANY,
unset=False, rule_owner=rule_owner),
mock.call(ctr['id'], ctr['id'],
amap.REVERSE_PREFIX + rules[in_d]['id'],
owner=ctr['tenant_id'], transaction=mock.ANY,
unset=False, rule_owner=rule_owner)]
self._check_call_list(
expected_calls,
mgr.manage_contract_subject_out_filter.call_args_list)
# Create policy_rule_set with BI rule
ctr = self.create_policy_rule_set(
name="ctr", policy_rules=[rules[bi]['id']])['policy_rule_set']
mgr.manage_contract_subject_in_filter.call_happened_with(
ctr['id'], ctr['id'], rules[bi]['id'], owner=ctr['tenant_id'],
transaction=mock.ANY, unset=False,
rule_owner=rule_owner)
mgr.manage_contract_subject_out_filter.call_happened_with(
ctr['id'], ctr['id'], rules[bi]['id'], owner=ctr['tenant_id'],
transaction=mock.ANY, unset=False,
rule_owner=rule_owner)
mgr.manage_contract_subject_in_filter.call_happened_with(
ctr['id'], ctr['id'], amap.REVERSE_PREFIX + rules[bi]['id'],
owner=ctr['tenant_id'], transaction=mock.ANY, unset=False,
rule_owner=rule_owner)
mgr.manage_contract_subject_out_filter.call_happened_with(
ctr['id'], ctr['id'], amap.REVERSE_PREFIX + rules[bi]['id'],
owner=ctr['tenant_id'], transaction=mock.ANY, unset=False,
rule_owner=rule_owner)
def test_policy_rule_set_created_with_rules(self):
self._test_policy_rule_set_created_with_rules()
def test_policy_rule_set_created_with_rules_shared(self):
self._test_policy_rule_set_created_with_rules(shared=True)
def _test_policy_rule_set_updated_with_new_rules(self, shared=False):
bi, in_d, out = range(3)
old_rules = self._create_3_direction_rules(shared=shared)
new_rules = self._create_3_direction_rules(shared=shared)
# exclude BI rule for now
ctr = self.create_policy_rule_set(
name="ctr",
policy_rules=[x['id'] for x in old_rules[1:]])['policy_rule_set']
data = {'policy_rule_set': {
'policy_rules': [x['id'] for x in new_rules[1:]]}}
rule_owner = (self.common_tenant if shared else
old_rules[in_d]['tenant_id'])
mgr = self.driver.apic_manager
mgr.manage_contract_subject_in_filter = MockCallRecorder()
mgr.manage_contract_subject_out_filter = MockCallRecorder()
self.new_update_request(
'policy_rule_sets', data, ctr['id'], self.fmt).get_response(
self.ext_api)
# Verify old IN rule unset and new IN rule set
self.assertTrue(
mgr.manage_contract_subject_in_filter.call_happened_with(
ctr['id'], ctr['id'], old_rules[in_d]['id'],
rule_owner=rule_owner,
owner=ctr['tenant_id'], transaction='transaction', unset=True))
self.assertTrue(
mgr.manage_contract_subject_in_filter.call_happened_with(
ctr['id'], ctr['id'], new_rules[in_d]['id'],
owner=ctr['tenant_id'], transaction='transaction',
unset=False, rule_owner=rule_owner))
self.assertTrue(
mgr.manage_contract_subject_out_filter.call_happened_with(
ctr['id'], ctr['id'], old_rules[out]['id'],
owner=ctr['tenant_id'], transaction='transaction', unset=True,
rule_owner=rule_owner))
self.assertTrue(
mgr.manage_contract_subject_out_filter.call_happened_with(
ctr['id'], ctr['id'], new_rules[out]['id'],
owner=ctr['tenant_id'], transaction='transaction',
unset=False, rule_owner=rule_owner))
ctr = self.create_policy_rule_set(
name="ctr",
policy_rules=[old_rules[0]['id']])['policy_rule_set']
data = {'policy_rule_set': {'policy_rules': [new_rules[0]['id']]}}
self.new_update_request(
'policy_rule_sets', data, ctr['id'], self.fmt).get_response(
self.ext_api)
# Verify old BI rule unset and new Bu rule set
self.assertTrue(
mgr.manage_contract_subject_in_filter.call_happened_with(
ctr['id'], ctr['id'], old_rules[bi]['id'],
owner=ctr['tenant_id'], transaction='transaction', unset=True,
rule_owner=rule_owner))
self.assertTrue(
mgr.manage_contract_subject_out_filter.call_happened_with(
ctr['id'], ctr['id'], old_rules[bi]['id'],
owner=ctr['tenant_id'], transaction='transaction', unset=True,
rule_owner=rule_owner))
self.assertTrue(
mgr.manage_contract_subject_in_filter.call_happened_with(
ctr['id'], ctr['id'], new_rules[bi]['id'],
owner=ctr['tenant_id'], transaction='transaction',
unset=False, rule_owner=rule_owner))
self.assertTrue(
mgr.manage_contract_subject_out_filter.call_happened_with(
ctr['id'], ctr['id'], new_rules[bi]['id'],
owner=ctr['tenant_id'], transaction='transaction',
unset=False, rule_owner=rule_owner))
def test_policy_rule_set_updated_with_new_rules(self):
self._test_policy_rule_set_updated_with_new_rules()
def test_policy_rule_set_updated_with_new_rules_shared(self):
self._test_policy_rule_set_updated_with_new_rules(shared=True)
def _create_3_direction_rules(self, shared=False):
a1 = self.create_policy_action(name='a1',
action_type='allow',
shared=shared)['policy_action']
cl_attr = {'protocol': 'tcp', 'port_range': 80}
cls = []
for direction in ['bi', 'in', 'out']:
if direction == 'out':
cl_attr['protocol'] = 'udp'
cls.append(self.create_policy_classifier(
direction=direction, shared=shared,
**cl_attr)['policy_classifier'])
rules = []
for classifier in cls:
rules.append(self.create_policy_rule(
policy_classifier_id=classifier['id'],
policy_actions=[a1['id']],
shared=shared)['policy_rule'])
return rules
class TestPolicyRule(ApicMappingTestCase):
def _test_policy_rule_created_on_apic(self, shared=False):
pr = self._create_simple_policy_rule('in', 'tcp', 88, shared=shared)
pr1 = self._create_simple_policy_rule('in', 'udp', 53, shared=shared)
pr2 = self._create_simple_policy_rule('in', None, 88, shared=shared)
tenant = self.common_tenant if shared else pr['tenant_id']
mgr = self.driver.apic_manager
expected_calls = [
mock.call(pr['id'], owner=tenant, entry='os-entry-0', etherT='ip',
prot='tcp', dToPort=88, dFromPort=88,
transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr['id'], owner=tenant,
entry='os-entry-0', etherT='ip', prot='tcp', sToPort=88,
sFromPort=88, tcpRules='est', transaction=mock.ANY),
mock.call(pr1['id'], owner=tenant, entry='os-entry-0',
etherT='ip', prot='udp', dToPort=53, dFromPort=53,
transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr1['id'], owner=tenant,
entry='os-entry-0', etherT='ip', prot='udp', sToPort=53,
sFromPort=53, transaction=mock.ANY),
mock.call(pr2['id'], owner=tenant, entry='os-entry-0',
etherT='unspecified', dToPort=88, dFromPort=88,
transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.create_tenant_filter.call_args_list)
mgr.reset_mock()
pr = self._create_simple_policy_rule('bi', None, None, shared=shared)
expected_calls = [
mock.call(pr['id'], owner=tenant, entry='os-entry-0',
etherT='unspecified', transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.create_tenant_filter.call_args_list)
def test_policy_rule_created_on_apic(self):
self._test_policy_rule_created_on_apic()
def test_policy_rule_created_on_apic_shared(self):
self._test_policy_rule_created_on_apic(shared=True)
def _test_policy_rule_deleted_on_apic(self, shared=False):
pr = self._create_simple_policy_rule(shared=shared)
pr1 = self._create_simple_policy_rule('in', 'udp', 53, shared=shared)
self.delete_policy_rule(pr['id'], expected_res_status=204)
tenant = self.common_tenant if shared else pr['tenant_id']
mgr = self.driver.apic_manager
expected_calls = [
mock.call(pr['id'], owner=tenant, transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr['id'], owner=tenant,
transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.delete_tenant_filter.call_args_list)
mgr.delete_tenant_filter.reset_mock()
self.delete_policy_rule(pr1['id'], expected_res_status=204)
expected_calls = [
mock.call(pr1['id'], owner=tenant, transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr1['id'], owner=tenant,
transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.delete_tenant_filter.call_args_list)
def test_policy_rule_deleted_on_apic(self):
self._test_policy_rule_deleted_on_apic()
def test_policy_rule_deleted_on_apic_shared(self):
self._test_policy_rule_deleted_on_apic(shared=True)
def test_policy_classifier_updated(self):
pa = self.create_policy_action(
action_type='allow', is_admin_context=True,
tenant_id='admin', shared=True)['policy_action']
pc = self.create_policy_classifier(
direction='in', protocol='udp', port_range=80,
shared=True, is_admin_context=True,
tenant_id='admin')['policy_classifier']
pr1 = self.create_policy_rule(
policy_classifier_id=pc['id'], policy_actions=[pa['id']],
shared=True, is_admin_context=True,
tenant_id='admin')['policy_rule']
pr2 = self.create_policy_rule(policy_classifier_id=pc['id'],
policy_actions=[pa['id']])['policy_rule']
prs1 = self.create_policy_rule_set(
policy_rules=[pr1['id']])['policy_rule_set']
prs2 = self.create_policy_rule_set(
policy_rules=[pr2['id'], pr1['id']])['policy_rule_set']
mgr = self.driver.apic_manager
mgr.reset_mock()
# Remove Classifier port, should just delete and create the filter
self.update_policy_classifier(pc['id'], port_range=None,
is_admin_context=True)
expected_calls = [
mock.call(pr1['id'], owner='common', etherT='ip', prot='udp',
entry='os-entry-0', transaction=mock.ANY),
mock.call(pr2['id'], owner='test-tenant', etherT='ip', prot='udp',
entry='os-entry-0', transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr1['id'], owner='common',
etherT='ip', prot='udp', entry='os-entry-0',
transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr2['id'], owner='test-tenant',
etherT='ip', prot='udp', entry='os-entry-0',
transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.create_tenant_filter.call_args_list)
expected_calls = [
mock.call(pr1['id'], owner='common', transaction=mock.ANY),
mock.call(pr2['id'], owner='test-tenant', transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr1['id'], owner='common',
transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr2['id'], owner='test-tenant',
transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.delete_tenant_filter.call_args_list)
self.assertFalse(mgr.manage_contract_subject_in_filter.called)
self.assertFalse(mgr.manage_contract_subject_out_filter.called)
mgr.reset_mock()
# Change Classifier protocol, to not revertible
self.update_policy_classifier(pc['id'], protocol=None,
is_admin_context=True)
expected_calls = [
mock.call(pr1['id'], owner='common', etherT='unspecified',
entry='os-entry-0', transaction=mock.ANY),
mock.call(pr2['id'], owner='test-tenant', etherT='unspecified',
entry='os-entry-0', transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.create_tenant_filter.call_args_list)
expected_calls = [
mock.call(pr1['id'], owner='common', transaction=mock.ANY),
mock.call(pr2['id'], owner='test-tenant', transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr1['id'], owner='common',
transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr2['id'], owner='test-tenant',
transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.delete_tenant_filter.call_args_list)
# Protocol went from revertible to non-revertible
self.assertTrue(mgr.manage_contract_subject_in_filter.called)
self.assertTrue(mgr.manage_contract_subject_out_filter.called)
mgr.reset_mock()
# Change Classifier protocol to revertible
self.update_policy_classifier(pc['id'], protocol='tcp',
is_admin_context=True)
expected_calls = [
mock.call(pr1['id'], owner='common', transaction=mock.ANY),
mock.call(pr2['id'], owner='test-tenant', transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr1['id'], owner='common',
transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr2['id'], owner='test-tenant',
transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.delete_tenant_filter.call_args_list)
expected_calls = [
mock.call(pr1['id'], owner='common', etherT='ip', prot='tcp',
entry='os-entry-0', transaction=mock.ANY),
mock.call(pr2['id'], owner='test-tenant', etherT='ip', prot='tcp',
entry='os-entry-0', transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr1['id'], owner='common',
etherT='ip', prot='tcp', tcpRules='est',
entry='os-entry-0', transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr2['id'], owner='test-tenant',
etherT='ip', prot='tcp', tcpRules='est',
entry='os-entry-0', transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.create_tenant_filter.call_args_list)
expected_calls = [
# Unset PR1 and PR2 IN
mock.call(prs1['id'], prs1['id'], pr1['id'], owner='test-tenant',
transaction=mock.ANY, unset=True, rule_owner='common'),
mock.call(prs2['id'], prs2['id'], pr1['id'], owner='test-tenant',
transaction=mock.ANY, unset=True, rule_owner='common'),
mock.call(prs2['id'], prs2['id'], pr2['id'], owner='test-tenant',
transaction=mock.ANY, unset=True,
rule_owner='test-tenant'),
# SET PR1 and PR2 IN
mock.call(prs1['id'], prs1['id'], pr1['id'], owner='test-tenant',
transaction=mock.ANY, unset=False, rule_owner='common'),
mock.call(prs2['id'], prs2['id'], pr1['id'], owner='test-tenant',
transaction=mock.ANY, unset=False, rule_owner='common'),
mock.call(prs2['id'], prs2['id'], pr2['id'], owner='test-tenant',
transaction=mock.ANY, unset=False,
rule_owner='test-tenant')
]
self._check_call_list(
expected_calls,
mgr.manage_contract_subject_in_filter.call_args_list)
# SET Reverse PR1 and PR2 OUT
expected_calls = [
mock.call(prs1['id'], prs1['id'], amap.REVERSE_PREFIX + pr1['id'],
owner='test-tenant', transaction=mock.ANY, unset=False,
rule_owner='common'),
mock.call(prs2['id'], prs2['id'], amap.REVERSE_PREFIX + pr1['id'],
owner='test-tenant', transaction=mock.ANY, unset=False,
rule_owner='common'),
mock.call(prs2['id'], prs2['id'], amap.REVERSE_PREFIX + pr2['id'],
owner='test-tenant', transaction=mock.ANY, unset=False,
rule_owner='test-tenant')
]
self._check_call_list(
expected_calls,
mgr.manage_contract_subject_out_filter.call_args_list)
def test_icmp_rule_created_on_apic(self):
pr = self._create_simple_policy_rule('in', 'icmp', None)
tenant = pr['tenant_id']
mgr = self.driver.apic_manager
expected_calls = [
mock.call(pr['id'], owner=tenant, entry='os-entry-0', etherT='ip',
prot='icmp', transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr['id'], owner=tenant,
entry=mock.ANY, etherT='ip', icmpv4T='echo-rep',
prot='icmp', transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr['id'], owner=tenant,
entry=mock.ANY, etherT='ip', icmpv4T='dst-unreach',
prot='icmp', transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr['id'], owner=tenant,
entry=mock.ANY, etherT='ip', icmpv4T='src-quench',
prot='icmp', transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr['id'], owner=tenant,
entry=mock.ANY, etherT='ip', icmpv4T='time-exceeded',
prot='icmp', transaction=mock.ANY)]
# verify that entry is always different
found = set()
for call in mgr.create_tenant_filter.call_args_list:
# Only for reverse filters
if call[0][0].startswith(amap.REVERSE_PREFIX):
self.assertFalse(call[1]['entry'] in found)
found.add(call[1]['entry'])
self._check_call_list(
expected_calls, mgr.create_tenant_filter.call_args_list)
class TestExternalSegment(ApicMappingTestCase):
def test_pat_rejected(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
# Verify Rejected on create
res = self.create_external_segment(
name='supported', port_address_translation=True,
expected_res_status=400)
self.assertEqual('PATNotSupportedByApicDriver',
res['NeutronError']['type'])
# Verify Rejected on Update
es = self.create_external_segment(
name='supported', expected_res_status=201,
port_address_translation=False)['external_segment']
res = self.update_external_segment(
es['id'], expected_res_status=400, port_address_translation=True)
self.assertEqual('PATNotSupportedByApicDriver',
res['NeutronError']['type'])
def test_edge_nat_invalid_vlan_range_rejected(self):
self._mock_external_dict([('supported', '192.168.0.2/24')],
is_edge_nat=True)
self.driver.l3out_vlan_alloc.l3out_vlan_ranges = {}
res = self.create_external_segment(
name='supported', expected_res_status=400)
self.assertEqual('EdgeNatBadVlanRange', res['NeutronError']['type'])
ext_info = self.driver.apic_manager.ext_net_dict.get('supported')
del ext_info['vlan_range']
res = self.create_external_segment(
name='supported', expected_res_status=400)
self.assertEqual('EdgeNatVlanRangeNotFound',
res['NeutronError']['type'])
def _test_create_delete(self, shared=False, is_edge_nat=False):
mgr = self.driver.apic_manager
self._mock_external_dict([('supported', '192.168.0.2/24')],
is_edge_nat)
mgr.ext_net_dict['supported']['host_pool_cidr'] = '192.168.200.1/24'
es = self.create_external_segment(name='supported',
cidr='192.168.0.2/24',
expected_res_status=201, shared=shared)['external_segment']
self.create_external_segment(name='unsupport', expected_res_status=201,
shared=shared)
self.assertEqual('192.168.0.2/24', es['cidr'])
self.assertIsNotNone(es['subnet_id'])
subnet = self._get_object('subnets', es['subnet_id'],
self.api)['subnet']
self.assertEqual('169.254.0.0/16', subnet['cidr'])
owner = es['tenant_id'] if not shared else self.common_tenant
prs = "NAT-allow-%s" % es['id']
if self.nat_enabled:
ctx = "NAT-vrf-%s" % es['id']
ctx_owner = owner
contract_owner = owner
if self.pre_l3out:
ctx = APIC_PRE_VRF
ctx_owner = APIC_PRE_VRF_TENANT
contract_owner = APIC_PRE_L3OUT_TENANT
self.assertFalse(mgr.ensure_context_enforced.called)
else:
mgr.ensure_context_enforced.assert_called_with(
owner=owner, ctx_id=ctx,
transaction=mock.ANY)
if not is_edge_nat:
mgr.ensure_bd_created_on_apic(
owner, "NAT-bd-%s" % es['id'], ctx_owner=ctx_owner,
ctx_name=ctx, transaction=mock.ANY)
mgr.ensure_epg_created.assert_called_with(
owner, "NAT-epg-%s" % es['id'],
bd_name="NAT-bd-%s" % es['id'],
transaction=mock.ANY)
else:
self.assertFalse(mgr.ensure_bd_created_on_apic.called)
self.assertFalse(mgr.ensure_epg_created.called)
mgr.create_tenant_filter.assert_called_with(
prs, owner=contract_owner,
entry="allow-all", transaction=mock.ANY)
mgr.manage_contract_subject_bi_filter.assert_called_with(
prs, prs, prs, owner=contract_owner, transaction=mock.ANY)
if not is_edge_nat:
expected_calls = [
mock.call(owner, "NAT-epg-%s" % es['id'], prs,
transaction=mock.ANY),
mock.call(owner, "NAT-epg-%s" % es['id'], prs,
provider=True, transaction=mock.ANY)]
self._check_call_list(expected_calls,
mgr.set_contract_for_epg.call_args_list)
else:
self.assertFalse(mgr.ensure_subnet_created_on_apic.called)
self.assertFalse(mgr.set_contract_for_epg.called)
ctx = context.get_admin_context()
internal_subnets = self._db_plugin.get_subnets(
ctx, filters={'name': [amap.HOST_SNAT_POOL]})
self.assertEqual(1, len(internal_subnets))
else:
self.assertFalse(mgr.ensure_bd_created_on_apic.called)
self.assertFalse(mgr.ensure_epg_created.called)
self.assertFalse(mgr.ensure_subnet_created_on_apic.called)
self.assertFalse(mgr.create_tenant_filter.called)
self.assertFalse(mgr.manage_contract_subject_bi_filter.called)
self.assertFalse(mgr.set_contract_for_epg.called)
subnet_id = es['subnet_id']
self.delete_external_segment(es['id'],
expected_res_status=webob.exc.HTTPNoContent.code)
self._get_object('subnets', subnet_id, self.api,
expected_res_status=404)
if self.nat_enabled:
ctx = "NAT-vrf-%s" % es['id']
ctx_owner = owner
contract_owner = owner
if self.pre_l3out:
ctx = APIC_PRE_VRF
ctx_owner = APIC_PRE_VRF_TENANT
contract_owner = APIC_PRE_L3OUT_TENANT
self.assertFalse(mgr.ensure_context_enforced.called)
else:
mgr.ensure_context_deleted.assert_called_with(
ctx_owner, ctx, transaction=mock.ANY)
if not is_edge_nat:
mgr.delete_bd_on_apic.assert_called_with(
owner, "NAT-bd-%s" % es['id'], transaction=mock.ANY)
mgr.delete_epg_for_network.assert_called_with(
owner, "NAT-epg-%s" % es['id'], transaction=mock.ANY)
else:
self.assertFalse(mgr.delete_bd_on_apic.called)
self.assertFalse(mgr.delete_epg_for_network.called)
mgr.delete_contract.assert_called_with(
prs, owner=contract_owner, transaction=mock.ANY)
mgr.delete_tenant_filter.assert_called_with(
prs, owner=contract_owner, transaction=mock.ANY)
else:
self.assertFalse(mgr.delete_bd_on_apic.called)
self.assertFalse(mgr.delete_epg_for_network.called)
self.assertFalse(mgr.delete_contract.called)
self.assertFalse(mgr.delete_tenant_filter.called)
def test_create_delete_unshared(self):
self._test_create_delete(False)
def test_create_delete_shared(self):
self._test_create_delete(True)
def test_create_delete_unshared_edge_nat(self):
self._test_create_delete(False, is_edge_nat=True)
def test_create_delete_shared_edge_nat(self):
self._test_create_delete(True, is_edge_nat=True)
def test_update_unsupported_noop(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='unsupport', cidr='192.168.0.0/24',
external_routes=[{'destination': '0.0.0.0/0',
'nexthop': '192.168.0.254'},
{'destination': '128.0.0.0/16',
'nexthop': None}],
expected_res_status=201)['external_segment']
self.update_external_segment(es['id'], expected_res_status=200,
external_routes=[])
mgr = self.driver.apic_manager
self.assertFalse(mgr.ensure_static_route_deleted.called)
self.assertFalse(mgr.ensure_external_epg_routes_deleted.called)
self.assertFalse(mgr.ensure_static_route_created.called)
self.assertFalse(mgr.ensure_external_epg_created.called)
self.assertFalse(mgr.ensure_next_hop_deleted.called)
def _test_route_update_remove(self, shared_es, is_edge_nat=False):
# Verify routes are updated correctly
self._mock_external_dict([('supported', '192.168.0.2/24')],
is_edge_nat)
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24', shared=shared_es,
external_routes=[{'destination': '0.0.0.0/0',
'nexthop': '192.168.0.254'},
{'destination': '128.0.0.0/16',
'nexthop': None}],
expected_res_status=201)['external_segment']
# create L3-policies
if self.pre_l3out and not self.nat_enabled:
tenants = [es['tenant_id']]
else:
tenants = (['tenant_a', 'tenant_b', 'tenant_c']
if self.nat_enabled and shared_es
else [es['tenant_id']])
l3p_list = []
for x in xrange(len(tenants)):
l3p = self.create_l3_policy(
shared=False,
tenant_id=tenants[x],
external_segments={es['id']: []},
expected_res_status=201)['l3_policy']
l3p_list.append(l3p)
# Attach external policy
f = self.create_external_policy
eps = [f(external_segments=[es['id']],
tenant_id=tenants[x],
expected_res_status=201)['external_policy']
for x in xrange(len(tenants))]
mgr = self.driver.apic_manager
owner = es['tenant_id'] if not shared_es else self.common_tenant
mgr.ensure_external_epg_created.reset_mock()
mgr.ensure_static_route_created.reset_mock()
# Remove route completely
self.update_external_segment(es['id'], expected_res_status=200,
external_routes=[
{'destination': '0.0.0.0/0',
'nexthop': '192.168.0.254'}])
sub_str = "Shd-%s-%s"
if is_edge_nat:
sub_str = "Auto-%s-%s"
mgr = self.driver.apic_manager
if not self.pre_l3out:
expected_delete_calls = []
expected_delete_calls.append(
mock.call(es['id'], mocked.APIC_EXT_SWITCH,
'128.0.0.0/16', owner=owner, transaction=mock.ANY))
if self.nat_enabled and is_edge_nat:
for x in range(len(tenants)):
l3p = l3p_list[x]
l3out = sub_str % (l3p['id'], es['id'])
tenant = tenants[x]
expected_delete_calls.append(
mock.call(l3out, mocked.APIC_EXT_SWITCH,
'128.0.0.0/16', owner=tenant,
transaction=mock.ANY))
self._check_call_list(expected_delete_calls,
mgr.ensure_static_route_deleted.call_args_list)
else:
self.assertFalse(mgr.ensure_static_route_deleted.called)
expected_delete_calls = []
for x in range(len(tenants)):
ep = eps[x]
l3p = l3p_list[x]
l3out = es['name' if self.pre_l3out else 'id']
ext_epg = ep['id']
tenant = APIC_PRE_L3OUT_TENANT if self.pre_l3out else owner
if self.nat_enabled:
l3out = sub_str % (l3p['id'], es['id'])
ext_epg = sub_str % (l3p['id'], ext_epg)
tenant = tenants[x]
expected_delete_calls.append(
mock.call(l3out, subnets=['128.0.0.0/16'],
external_epg=ext_epg, owner=tenant,
transaction=mock.ANY))
self._check_call_list(
expected_delete_calls,
mgr.ensure_external_epg_routes_deleted.call_args_list)
self.assertFalse(mgr.ensure_static_route_created.called)
self.assertFalse(mgr.ensure_external_epg_created.called)
self.assertFalse(mgr.ensure_next_hop_deleted.called)
# Remove nexthop only
mgr.ensure_static_route_deleted.reset_mock()
mgr.ensure_external_epg_routes_deleted.reset_mock()
self.update_external_segment(es['id'], expected_res_status=200,
external_routes=[
{'destination': '0.0.0.0/0',
'nexthop': None}])
if not self.pre_l3out:
expected_delete_calls = []
expected_create_calls = []
expected_delete_calls.append(
mock.call(es['id'], mocked.APIC_EXT_SWITCH, '0.0.0.0/0',
'192.168.0.254', owner=owner, transaction=mock.ANY))
# Being the new nexthop 'None', the default one is used
expected_create_calls.append(
mock.call(es['id'], mocked.APIC_EXT_SWITCH, '192.168.0.1',
subnet='0.0.0.0/0', owner=owner, transaction=mock.ANY))
if self.nat_enabled and is_edge_nat:
for x in range(len(tenants)):
l3p = l3p_list[x]
l3out = sub_str % (l3p['id'], es['id'])
tenant = tenants[x]
expected_delete_calls.append(
mock.call(l3out, mocked.APIC_EXT_SWITCH, '0.0.0.0/0',
'192.168.0.254', owner=tenant,
transaction=mock.ANY))
expected_create_calls.append(
mock.call(l3out, mocked.APIC_EXT_SWITCH, '192.168.0.1',
subnet='0.0.0.0/0', owner=tenant,
transaction=mock.ANY))
self._check_call_list(expected_delete_calls,
mgr.ensure_next_hop_deleted.call_args_list)
self._check_call_list(expected_create_calls,
mgr.ensure_static_route_created.call_args_list)
else:
self.assertFalse(mgr.ensure_static_route_created.called)
self.assertFalse(mgr.ensure_next_hop_deleted.called)
expected_delete_calls = []
for x in range(len(tenants)):
ep = eps[x]
l3p = l3p_list[x]
l3out = es['name' if self.pre_l3out else 'id']
ext_epg = ep['id']
tenant = APIC_PRE_L3OUT_TENANT if self.pre_l3out else owner
if self.nat_enabled:
l3out = sub_str % (l3p['id'], es['id'])
ext_epg = sub_str % (l3p['id'], ext_epg)
tenant = tenants[x]
expected_delete_calls.append(
mock.call(l3out, subnet='0.0.0.0/0',
external_epg=ext_epg, owner=tenant,
transaction=mock.ANY))
self._check_call_list(expected_delete_calls,
mgr.ensure_external_epg_created.call_args_list)
self.assertFalse(mgr.ensure_static_route_deleted.called)
self.assertFalse(mgr.ensure_external_epg_routes_deleted.called)
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_route_update_remove_1(self):
self._test_route_update_remove(shared_es=True)
def test_route_update_remove_2(self):
self._test_route_update_remove(shared_es=False)
def test_route_update_remove_edge_nat_mode_1(self):
self._test_route_update_remove(shared_es=True, is_edge_nat=True)
def test_route_update_remove_edge_nat_mode_2(self):
self._test_route_update_remove(shared_es=False, is_edge_nat=True)
def _test_route_update_add(self, shared_es, is_edge_nat=False):
# Verify routes are updated correctly
self._mock_external_dict([('supported', '192.168.0.2/24')],
is_edge_nat)
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24', shared=shared_es,
external_routes=[], expected_res_status=201)['external_segment']
if self.pre_l3out and not self.nat_enabled:
tenants = [es['tenant_id']]
else:
tenants = (['tenant_a', 'tenant_b', 'tenant_c']
if self.nat_enabled and shared_es
else [es['tenant_id']])
# create L3-policies
l3p_list = []
for x in xrange(len(tenants)):
l3p = self.create_l3_policy(
shared=False,
tenant_id=tenants[x],
external_segments={es['id']: []},
expected_res_status=201)['l3_policy']
l3p_list.append(l3p)
# Attach external policies
f = self.create_external_policy
eps = [f(external_segments=[es['id']],
tenant_id=tenants[x],
expected_res_status=201)['external_policy']
for x in xrange(len(tenants))]
mgr = self.driver.apic_manager
mgr.ensure_static_route_created.reset_mock()
mgr.ensure_external_epg_created.reset_mock()
owner = es['tenant_id'] if not shared_es else self.common_tenant
self.update_external_segment(es['id'], expected_res_status=200,
external_routes=[
{'destination': '128.0.0.0/16',
'nexthop': '192.168.0.254'}])
sub_str = "Shd-%s-%s"
if is_edge_nat:
sub_str = "Auto-%s-%s"
if not self.pre_l3out:
expected_create_calls = []
expected_create_calls.append(
mock.call(es['id'], mocked.APIC_EXT_SWITCH,
'192.168.0.254', subnet='128.0.0.0/16',
owner=owner, transaction=mock.ANY))
if self.nat_enabled and is_edge_nat:
for x in range(len(tenants)):
l3p = l3p_list[x]
l3out = sub_str % (l3p['id'], es['id'])
tenant = tenants[x]
expected_create_calls.append(
mock.call(l3out, mocked.APIC_EXT_SWITCH,
'192.168.0.254', subnet='128.0.0.0/16',
owner=tenant, transaction=mock.ANY))
self._check_call_list(expected_create_calls,
mgr.ensure_static_route_created.call_args_list)
else:
self.assertFalse(mgr.ensure_static_route_created.called)
expected_create_calls = []
for x in range(len(tenants)):
ep = eps[x]
l3p = l3p_list[x]
l3out = es['name' if self.pre_l3out else 'id']
ext_epg = ep['id']
tenant = APIC_PRE_L3OUT_TENANT if self.pre_l3out else owner
if self.nat_enabled:
l3out = sub_str % (l3p['id'], es['id'])
ext_epg = sub_str % (l3p['id'], ext_epg)
tenant = tenants[x]
expected_create_calls.append(
mock.call(l3out, subnet='128.0.0.0/16',
external_epg=ext_epg, owner=tenant,
transaction=mock.ANY))
self._check_call_list(expected_create_calls,
mgr.ensure_external_epg_created.call_args_list)
self.assertFalse(mgr.ensure_static_route_deleted.called)
self.assertFalse(mgr.ensure_external_epg_routes_deleted.called)
self.assertFalse(mgr.ensure_next_hop_deleted.called)
mgr.ensure_static_route_created.reset_mock()
mgr.ensure_external_epg_created.reset_mock()
# Verify Route added with default gateway
self.update_external_segment(es['id'], expected_res_status=200,
external_routes=[
{'destination': '128.0.0.0/16',
'nexthop': '192.168.0.254'},
{'destination': '0.0.0.0/0',
'nexthop': None}])
if not self.pre_l3out:
expected_create_calls = []
expected_create_calls.append(
mock.call(es['id'], mocked.APIC_EXT_SWITCH,
'192.168.0.1', subnet='0.0.0.0/0',
owner=owner, transaction=mock.ANY))
if self.nat_enabled and is_edge_nat:
for x in range(len(tenants)):
l3p = l3p_list[x]
l3out = sub_str % (l3p['id'], es['id'])
tenant = tenants[x]
expected_create_calls.append(
mock.call(l3out, mocked.APIC_EXT_SWITCH, '192.168.0.1',
subnet='0.0.0.0/0', owner=tenant,
transaction=mock.ANY))
self._check_call_list(expected_create_calls,
mgr.ensure_static_route_created.call_args_list)
else:
self.assertFalse(mgr.ensure_static_route_created.called)
expected_create_calls = []
for x in range(len(tenants)):
ep = eps[x]
l3p = l3p_list[x]
l3out = es['name' if self.pre_l3out else 'id']
ext_epg = ep['id']
tenant = APIC_PRE_L3OUT_TENANT if self.pre_l3out else owner
if self.nat_enabled:
l3out = sub_str % (l3p['id'], es['id'])
ext_epg = sub_str % (l3p['id'], ext_epg)
tenant = tenants[x]
expected_create_calls.append(
mock.call(l3out, subnet='0.0.0.0/0',
external_epg=ext_epg, owner=tenant,
transaction=mock.ANY))
self._check_call_list(expected_create_calls,
mgr.ensure_external_epg_created.call_args_list)
self.assertFalse(mgr.ensure_static_route_deleted.called)
self.assertFalse(mgr.ensure_external_epg_routes_deleted.called)
self.assertFalse(mgr.ensure_next_hop_deleted.called)
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_route_update_add_1(self):
self._test_route_update_add(shared_es=True)
def test_route_update_add_2(self):
self._test_route_update_add(shared_es=False)
def test_route_update_add_edge_nat_mode_1(self):
self._test_route_update_add(shared_es=True, is_edge_nat=True)
def test_route_update_add_edge_nat_mode_2(self):
self._test_route_update_add(shared_es=False, is_edge_nat=True)
def test_es_create_no_cidr_with_routes(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
nh = '172.16.0.1' if self.pre_l3out else '192.168.0.254'
self.create_external_segment(
name='supported',
external_routes=[{'destination': '0.0.0.0/0',
'nexthop': nh}],
expected_res_status=201)
def test_implicit_es_router_gw_ip(self):
self._mock_external_dict([('default', '192.168.0.2/24')])
es = self.create_external_segment(
name='default',
external_routes=[{'destination': '0.0.0.0/0',
'nexthop': None}])['external_segment']
l3p = self.create_l3_policy()['l3_policy']
self.assertEqual(es['id'],
l3p['external_segments'].keys()[0])
self.assertEqual('169.254.0.2',
l3p['external_segments'][es['id']][0])
def _do_test_plug_l3p_to_es_with_multi_ep(self):
tenants = (['tenant_a', 'tenant_b', 'tenant_c']
if self.nat_enabled else ['tenant_a'])
self._mock_external_dict([('supported', '192.168.0.2/24')])
ext_routes = ['128.0.0.0/24', '128.0.1.0/24']
es_list = [
self.create_external_segment(
name='supported', cidr='192.168.0.0/24', shared=True,
expected_res_status=201,
external_routes=[{
'destination': ext_routes[x],
'nexthop': '192.168.0.254'}])['external_segment']
for x in range(2)]
ep_list = []
for x in range(len(tenants)):
ep = self.create_external_policy(
name=(x < 2 and APIC_EXTERNAL_EPG or 'other-ext-epg'),
external_segments=[e['id'] for e in es_list],
tenant_id=tenants[x],
expected_res_status=201)['external_policy']
ep_list.append(ep)
mgr = self.driver.apic_manager
mgr.ensure_external_epg_created.reset_mock()
mgr.set_contract_for_external_epg.reset_mock()
ep = ep_list[0]
l3p = self.create_l3_policy(
shared=False,
tenant_id=tenants[0],
external_segments={x['id']: [] for x in es_list},
expected_res_status=201)['l3_policy']
expected_create_calls = []
expected_assoc_calls = []
expected_contract_calls = []
if self.nat_enabled:
for es in es_list:
if not self.pre_l3out:
expected_create_calls.append(
mock.call(es['id'], subnet='0.0.0.0/0',
external_epg='default-%s' % es['id'],
owner=self.common_tenant,
transaction=mock.ANY))
expected_create_calls.append(
mock.call("Shd-%s-%s" % (l3p['id'], es['id']),
subnet=es['external_routes'][0]['destination'],
external_epg="Shd-%s-%s" % (l3p['id'], ep['id']),
owner=l3p['tenant_id'],
transaction=mock.ANY))
expected_assoc_calls.append(
mock.call(l3p['tenant_id'],
"Shd-%s-%s" % (l3p['id'], es['id']),
"Shd-%s-%s" % (l3p['id'], ep['id']),
"NAT-epg-%s" % es['id'],
target_owner=(l3p['tenant_id']
if self.driver.per_tenant_nat_epg
else self.common_tenant),
transaction=mock.ANY))
l3out = es['name' if self.pre_l3out else 'id']
l3out_owner = (APIC_PRE_L3OUT_TENANT
if self.pre_l3out else self.common_tenant)
nat_contract = "NAT-allow-%s" % es['id']
ext_epg = (ep['name']
if self.pre_l3out else ('default-%s' % es['id']))
expected_contract_calls.append(
mock.call(l3out, nat_contract,
external_epg=ext_epg,
owner=l3out_owner,
provided=True, transaction=mock.ANY))
expected_contract_calls.append(
mock.call(l3out, nat_contract,
external_epg=ext_epg,
owner=l3out_owner,
provided=False, transaction=mock.ANY))
self._check_call_list(expected_create_calls,
mgr.ensure_external_epg_created.call_args_list)
self._check_call_list(expected_assoc_calls,
mgr.associate_external_epg_to_nat_epg.call_args_list)
self._check_call_list(expected_contract_calls,
mgr.set_contract_for_external_epg.call_args_list)
def test_plug_l3p_to_es_with_multi_ep(self):
self._do_test_plug_l3p_to_es_with_multi_ep()
def test_plug_l3p_to_es_with_multi_ep_ptne(self):
self.driver.per_tenant_nat_epg = True
self._do_test_plug_l3p_to_es_with_multi_ep()
class TestExternalSegmentNoNat(TestExternalSegment):
def setUp(self):
super(TestExternalSegmentNoNat, self).setUp(nat_enabled=False)
class TestExternalSegmentPreL3Out(TestExternalSegment):
def setUp(self, **kwargs):
kwargs['pre_existing_l3out'] = True
super(TestExternalSegmentPreL3Out, self).setUp(**kwargs)
def test_query_l3out_info(self):
self.driver._query_l3out_info = self.orig_query_l3out_info
ctx1 = [{
'l3extRsEctx': {'attributes': {'tDn': 'uni/tn-foo/ctx-foobar'}}}]
mgr = self.driver.apic_manager
mgr.apic.l3extOut.get_subtree.return_value = ctx1
info = self.driver._query_l3out_info('l3out', 'bar_tenant')
self.assertEqual('bar_tenant', info['l3out_tenant'])
self.assertEqual('foobar', info['vrf_name'])
self.assertEqual('foo', info['vrf_tenant'])
mgr.apic.l3extOut.get_subtree.reset_mock()
mgr.apic.l3extOut.get_subtree.return_value = []
info = self.driver._query_l3out_info('l3out', 'bar_tenant')
self.assertEqual(None, info)
expected_calls = [
mock.call('bar_tenant', 'l3out'),
mock.call('common', 'l3out')]
self._check_call_list(
expected_calls, mgr.apic.l3extOut.get_subtree.call_args_list)
def test_l3out_tenant(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
self.driver._query_l3out_info.return_value['l3out_tenant'] = (
apic_mapper.ApicName('some_other_tenant'))
res = self.create_external_segment(name='supported',
tenant_id='a_tenant', cidr='192.168.0.2/24',
expected_res_status=400)
self.assertEqual('PreExistingL3OutInIncorrectTenant',
res['NeutronError']['type'])
self.create_external_segment(name='supported',
tenant_id='some_other_tenant', cidr='192.168.0.2/24',
expected_res_status=201)
def test_edge_nat_wrong_L3out_IF_type_rejected(self):
self._mock_external_dict([('supported', '192.168.0.2/24')],
is_edge_nat=True)
self.driver._query_l3out_info.return_value['l3out'] = (
[{u'l3extLNodeP':
{u'attributes':
{u'dn': u'uni/tn-common/out-supported/lnodep-Leaf3-4_NP'},
u'children': [{u'l3extLIfP':
{u'children': [{u'l3extRsPathL3OutAtt':
{u'attributes':
{u'ifInstT': u'l3-port'
}}}]}}]}}])
res = self.create_external_segment(
name='supported', expected_res_status=400)
self.assertEqual('EdgeNatWrongL3OutIFType',
res['NeutronError']['type'])
def test_edge_nat_wrong_L3out_OSPF_Auth_type_rejected(self):
self._mock_external_dict([('supported', '192.168.0.2/24')],
is_edge_nat=True)
self.driver._query_l3out_info.return_value['l3out'] = (
[{u'l3extLNodeP':
{u'attributes':
{u'dn': u'uni/tn-common/out-supported/lnodep-Leaf3-4_NP'},
u'children': [{u'l3extLIfP':
{u'children': [{u'ospfIfP':
{u'attributes':
{u'authType': u'simple'
}}}]}}]}}])
res = self.create_external_segment(
name='supported', expected_res_status=400)
self.assertEqual('EdgeNatWrongL3OutAuthTypeForOSPF',
res['NeutronError']['type'])
def test_edge_nat_wrong_L3out_BGP_Auth_type_rejected(self):
self._mock_external_dict([('supported', '192.168.0.2/24')],
is_edge_nat=True)
self.driver._query_l3out_info.return_value['l3out'] = (
[{u'l3extLNodeP':
{u'attributes':
{u'dn': u'uni/tn-common/out-supported/lnodep-Leaf3-4_NP'},
u'children': [{u'l3extLIfP':
{u'children': [{u'l3extRsNodeL3OutAtt':
{u'attributes':
{u'type': u'sha1'}}},
{u'bfdIfP':
{u'attributes':
{u'type': u'sha1'}}},
{u'l3extRsNodeL3OutAtt':
{u'attributes':
{u'type': u'sha1'}}}]}}]}}])
res = self.create_external_segment(
name='supported', expected_res_status=400)
self.assertEqual('EdgeNatWrongL3OutAuthTypeForBGP',
res['NeutronError']['type'])
# try again with a good input
self.driver._query_l3out_info.return_value['l3out'] = (
[{u'l3extLNodeP':
{u'attributes':
{u'dn': u'uni/tn-common/out-supported/lnodep-Leaf3-4_NP'},
u'children': [{u'l3extLIfP':
{u'children': [{u'l3extRsNodeL3OutAtt':
{u'attributes':
{u'type': u'sha1'}}},
{u'bfdIfP':
{u'attributes':
{u'type': u'none'}}},
{u'l3extRsNodeL3OutAtt':
{u'attributes':
{u'type': u'sha1'}}}]}}]}}])
res = self.create_external_segment(
name='supported', expected_res_status=201)
class TestExternalSegmentNoNatPreL3Out(TestExternalSegmentPreL3Out):
def setUp(self):
super(TestExternalSegmentNoNatPreL3Out, self).setUp(
nat_enabled=False)
class TestExternalPolicy(ApicMappingTestCase):
def test_creation_noop(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24',
external_routes=[], expected_res_status=201)['external_segment']
self.create_external_policy(
name=APIC_EXTERNAL_EPG,
external_segments=[es['id']], expected_res_status=201)
# Verify called with default route always
mgr = self.driver.apic_manager
if self.nat_enabled and not self.pre_l3out:
mgr.ensure_external_epg_created.assert_called_once_with(
es['id'], subnet='0.0.0.0/0',
external_epg=("default-%s" % es['id']), owner=es['tenant_id'],
transaction=mock.ANY)
else:
self.assertFalse(mgr.ensure_external_epg_created.called)
mgr.ensure_external_epg_created.reset_mock()
es = self.create_external_segment(
name='unsupported', cidr='192.168.0.0/24', expected_res_status=201,
external_routes=[{'destination': '128.0.0.0/16',
'nexthop': '192.168.0.254'}])['external_segment']
self.create_external_policy(
external_segments=[es['id']], expected_res_status=201)
# Verify noop on unsupported
self.assertFalse(mgr.ensure_external_epg_created.called)
def test_create_shared(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24',
external_routes=[], shared=True,
expected_res_status=201)['external_segment']
res = self.create_external_policy(
external_segments=[es['id']], shared=True,
expected_res_status=400)
self.assertEqual('SharedExternalPolicyUnsupported',
res['NeutronError']['type'])
def test_update_shared(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24',
external_routes=[], shared=True,
expected_res_status=201)['external_segment']
ep = self.create_external_policy(
external_segments=[es['id']],
expected_res_status=201)['external_policy']
res = self.update_external_policy(
ep['id'], shared=True, expected_res_status=400)
self.assertEqual('SharedExternalPolicyUnsupported',
res['NeutronError']['type'])
def _test_creation_no_prs(self, shared_es, is_edge_nat=False):
self._mock_external_dict([('supported', '192.168.0.2/24')],
is_edge_nat)
es_list = [
self.create_external_segment(
name='supported', cidr='192.168.0.0/24', shared=shared_es,
expected_res_status=201,
external_routes=[{
'destination': '128.0.0.0/16',
'nexthop': '192.168.0.254'}])['external_segment']
for x in range(3)]
l3p_list = []
for x in xrange(len(es_list)):
l3p = self.create_l3_policy(
shared=False,
tenant_id=shared_es and 'another' or es_list[x]['tenant_id'],
external_segments={es_list[x]['id']: []},
expected_res_status=201)['l3_policy']
l3p_list.append(l3p)
ep = self.create_external_policy(
name=APIC_EXTERNAL_EPG,
external_segments=[x['id'] for x in es_list],
tenant_id=es_list[0]['tenant_id'] if not shared_es else 'another',
expected_res_status=201)['external_policy']
mgr = self.driver.apic_manager
owner = (es_list[0]['tenant_id'] if not shared_es
else self.common_tenant)
l3p_owner = l3p_list[0]['tenant_id']
expected_create_calls = []
sub_str = "Shd-%s-%s"
if is_edge_nat:
sub_str = "Auto-%s-%s"
for x in range(len(es_list)):
es = es_list[x]
l3p = l3p_list[x]
if self.nat_enabled:
if not self.pre_l3out:
expected_create_calls.append(
mock.call(es['id'], subnet='0.0.0.0/0',
external_epg="default-%s" % es['id'], owner=owner,
transaction=mock.ANY))
expected_create_calls.append(
mock.call(sub_str % (l3p['id'], es['id']),
subnet='128.0.0.0/16',
external_epg=(sub_str % (l3p['id'], ep['id'])),
owner=l3p_owner,
transaction=mock.ANY))
elif not self.pre_l3out:
expected_create_calls.append(
mock.call(es['id'], subnet='128.0.0.0/16',
external_epg=ep['id'], owner=owner,
transaction=mock.ANY))
self._check_call_list(expected_create_calls,
mgr.ensure_external_epg_created.call_args_list)
if self.nat_enabled:
expected_contract_calls = []
ext_epg_tenant = (APIC_PRE_L3OUT_TENANT if self.pre_l3out
else owner)
for x in range(len(es_list)):
es = es_list[x]
ext_epg = (APIC_EXTERNAL_EPG if self.pre_l3out
else "default-%s" % es['id'])
es_name = es['name' if self.pre_l3out else 'id']
nat_contract = "NAT-allow-%s" % es['id']
expected_contract_calls.extend([
mock.call(es_name, nat_contract,
external_epg=ext_epg, owner=ext_epg_tenant,
provided=True, transaction=mock.ANY),
mock.call(es_name, nat_contract,
external_epg=ext_epg, owner=ext_epg_tenant,
provided=False, transaction=mock.ANY)])
self._check_call_list(expected_contract_calls,
mgr.set_contract_for_external_epg.call_args_list)
else:
self.assertFalse(mgr.set_contract_for_external_epg.called)
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_creation_no_prs_1(self):
self._test_creation_no_prs(shared_es=True)
def test_creation_no_prs_2(self):
self._test_creation_no_prs(shared_es=False)
def test_creation_no_prs_edge_nat_mode_1(self):
self._test_creation_no_prs(shared_es=True, is_edge_nat=True)
def test_creation_no_prs_edge_nat_mode_2(self):
self._test_creation_no_prs(shared_es=False, is_edge_nat=True)
def _test_update_no_prs(self, shared_es, is_edge_nat=False):
self._mock_external_dict([('supported', '192.168.0.2/24')],
is_edge_nat)
es_list = [
self.create_external_segment(
name='supported', cidr='192.168.0.0/24', shared=shared_es,
expected_res_status=201,
external_routes=[{
'destination': '128.0.0.0/16',
'nexthop': '192.168.0.254'}])['external_segment']
for x in range(3)]
l3p_list = []
for x in xrange(len(es_list)):
l3p = self.create_l3_policy(
shared=False,
tenant_id=shared_es and 'another' or es_list[x]['tenant_id'],
external_segments={es_list[x]['id']: []},
expected_res_status=201)['l3_policy']
l3p_list.append(l3p)
ep = self.create_external_policy(
name=APIC_EXTERNAL_EPG,
tenant_id=es_list[0]['tenant_id'] if not shared_es else 'another',
expected_res_status=201)['external_policy']
ep = self.update_external_policy(
ep['id'], expected_res_status=200, tenant_id=ep['tenant_id'],
external_segments=[x['id'] for x in es_list])['external_policy']
mgr = self.driver.apic_manager
owner = (es_list[0]['tenant_id'] if not shared_es
else self.common_tenant)
l3p_owner = l3p_list[0]['tenant_id']
expected_create_calls = []
sub_str = "Shd-%s-%s"
if is_edge_nat:
sub_str = "Auto-%s-%s"
for x in range(len(es_list)):
es = es_list[x]
l3p = l3p_list[x]
if self.nat_enabled:
if not self.pre_l3out:
expected_create_calls.append(
mock.call(es['id'], subnet='0.0.0.0/0',
external_epg="default-%s" % es['id'],
owner=owner, transaction=mock.ANY))
expected_create_calls.append(
mock.call(sub_str % (l3p['id'], es['id']),
subnet='128.0.0.0/16',
external_epg=sub_str % (l3p['id'], ep['id']),
owner=l3p_owner, transaction=mock.ANY))
elif not self.pre_l3out:
expected_create_calls.append(
mock.call(es['id'], subnet='128.0.0.0/16',
external_epg=ep['id'],
owner=owner, transaction=mock.ANY))
self._check_call_list(expected_create_calls,
mgr.ensure_external_epg_created.call_args_list)
if self.nat_enabled:
expected_contract_calls = []
ext_epg_tenant = (APIC_PRE_L3OUT_TENANT if self.pre_l3out
else owner)
for x in range(len(es_list)):
es = es_list[x]
ext_epg = (APIC_EXTERNAL_EPG if self.pre_l3out
else "default-%s" % es['id'])
es_name = es['name' if self.pre_l3out else 'id']
nat_contract = "NAT-allow-%s" % es['id']
expected_contract_calls.extend([
mock.call(es_name, nat_contract,
external_epg=ext_epg, owner=ext_epg_tenant,
provided=True, transaction=mock.ANY),
mock.call(es_name, nat_contract,
external_epg=ext_epg, owner=ext_epg_tenant,
provided=False, transaction=mock.ANY)])
self._check_call_list(expected_contract_calls,
mgr.set_contract_for_external_epg.call_args_list)
else:
self.assertFalse(mgr.set_contract_for_external_epg.called)
ep = self.update_external_policy(
ep['id'], expected_res_status=200, tenant_id=ep['tenant_id'],
external_segments=[])['external_policy']
mgr = self.driver.apic_manager
expected_create_calls = []
for x in range(len(es_list)):
es = es_list[x]
l3p = l3p_list[x]
if self.nat_enabled:
if not self.pre_l3out:
expected_create_calls.append(
mock.call(es['id'], owner=owner,
external_epg="default-%s" % es['id']))
expected_create_calls.append(
mock.call(sub_str % (l3p['id'], es['id']),
owner=l3p_owner,
external_epg=sub_str % (l3p['id'], ep['id'])))
elif not self.pre_l3out:
expected_create_calls.append(
mock.call(es['id'], owner=owner, external_epg=ep['id']))
self._check_call_list(expected_create_calls,
mgr.ensure_external_epg_deleted.call_args_list)
if self.nat_enabled and self.pre_l3out:
expected_contract_calls = []
ext_epg_tenant = APIC_PRE_L3OUT_TENANT
for x in range(len(es_list)):
es = es_list[x]
nat_contract = "NAT-allow-%s" % es['id']
expected_contract_calls.extend([
mock.call(es['name'], nat_contract,
external_epg=APIC_EXTERNAL_EPG, owner=ext_epg_tenant,
provided=True, transaction=mock.ANY),
mock.call(es['name'], nat_contract,
external_epg=APIC_EXTERNAL_EPG, owner=ext_epg_tenant,
provided=False, transaction=mock.ANY)])
self._check_call_list(expected_contract_calls,
mgr.unset_contract_for_external_epg.call_args_list)
else:
self.assertFalse(mgr.unset_contract_for_external_epg.called)
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_update_no_prs_1(self):
self._test_update_no_prs(shared_es=True)
def test_update_no_prs_2(self):
self._test_update_no_prs(shared_es=False)
def test_update_no_prs_edge_nat_mode_1(self):
self._test_update_no_prs(shared_es=True, is_edge_nat=True)
def test_update_no_prs_edge_nat_mode_2(self):
self._test_update_no_prs(shared_es=False, is_edge_nat=True)
def _test_create_with_prs(self, shared_es, shared_prs, is_edge_nat=False):
self._mock_external_dict([('supported', '192.168.0.2/24')],
is_edge_nat)
es_list = [
self.create_external_segment(
name='supported', cidr='192.168.0.0/24', shared=shared_es,
expected_res_status=201,
external_routes=[{
'destination': '128.0.0.0/16',
'nexthop': '192.168.0.254'}])['external_segment']
for x in range(3)]
l3p_list = []
for x in xrange(len(es_list)):
l3p = self.create_l3_policy(
shared=False,
tenant_id=shared_es and 'another' or es_list[x]['tenant_id'],
external_segments={es_list[x]['id']: []},
expected_res_status=201)['l3_policy']
l3p_list.append(l3p)
prov = self._create_policy_rule_set_on_shared(
shared=shared_prs,
tenant_id=es_list[0]['tenant_id'] if not (
shared_es | shared_prs) else 'another')
cons = self._create_policy_rule_set_on_shared(
shared=shared_prs,
tenant_id=es_list[0]['tenant_id'] if not (
shared_es | shared_prs) else 'another')
ep = self.create_external_policy(
name=APIC_EXTERNAL_EPG,
provided_policy_rule_sets={prov['id']: ''},
consumed_policy_rule_sets={cons['id']: ''},
tenant_id=es_list[0]['tenant_id'] if not shared_es else 'another',
external_segments=[x['id'] for x in es_list],
expected_res_status=201)['external_policy']
mgr = self.driver.apic_manager
owner = (es_list[0]['tenant_id'] if not shared_es
else self.common_tenant)
l3p_owner = l3p_list[0]['tenant_id']
expected_calls = []
for x in range(len(es_list)):
es = es_list[x]
l3p = l3p_list[x]
nat = self.nat_enabled
external_epg = APIC_EXTERNAL_EPG if self.pre_l3out else (
("default-%s" % es['id']) if nat else ep['id'])
ext_epg_tenant = (APIC_PRE_L3OUT_TENANT if self.pre_l3out else
owner)
es_name = es['name' if self.pre_l3out else 'id']
expected_calls.append(
mock.call(es_name,
("NAT-allow-%s" % es['id']) if nat else prov['id'],
external_epg=external_epg,
provided=True, owner=ext_epg_tenant,
transaction=mock.ANY))
expected_calls.append(
mock.call(es_name,
("NAT-allow-%s" % es['id']) if nat else cons['id'],
external_epg=external_epg,
provided=False, owner=ext_epg_tenant,
transaction=mock.ANY))
if nat:
sub_str = "Shd-%s-%s"
if is_edge_nat:
sub_str = "Auto-%s-%s"
expected_calls.append(
mock.call(sub_str % (l3p['id'], es['id']), prov['id'],
external_epg=(sub_str % (l3p['id'], ep['id'])),
provided=True, owner=l3p_owner,
transaction=mock.ANY))
expected_calls.append(
mock.call(sub_str % (l3p['id'], es['id']), cons['id'],
external_epg=(sub_str % (l3p['id'], ep['id'])),
provided=False, owner=l3p_owner,
transaction=mock.ANY))
self._check_call_list(expected_calls,
mgr.set_contract_for_external_epg.call_args_list)
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_create_with_prs_1(self):
self._test_create_with_prs(shared_es=True, shared_prs=True)
def test_create_with_prs_2(self):
self._test_create_with_prs(shared_es=True, shared_prs=False)
def test_create_with_prs_3(self):
self._test_create_with_prs(shared_es=False, shared_prs=False)
def test_create_with_prs_4(self):
self._test_create_with_prs(shared_es=False, shared_prs=True)
def test_create_with_prs_edge_nat_mode_1(self):
self._test_create_with_prs(shared_es=True, shared_prs=True,
is_edge_nat=True)
def test_create_with_prs_edge_nat_mode_2(self):
self._test_create_with_prs(shared_es=True, shared_prs=False,
is_edge_nat=True)
def test_create_with_prs_edge_nat_mode_3(self):
self._test_create_with_prs(shared_es=False, shared_prs=False,
is_edge_nat=True)
def test_create_with_prs_edge_nat_mode_4(self):
self._test_create_with_prs(shared_es=False, shared_prs=True,
is_edge_nat=True)
def _test_update_add_prs(self, shared_es, shared_prs, is_edge_nat=False):
self._mock_external_dict([('supported', '192.168.0.2/24')],
is_edge_nat)
es_list = [
self.create_external_segment(
name='supported', cidr='192.168.0.0/24', shared=shared_es,
expected_res_status=201,
external_routes=[{
'destination': '128.0.0.0/16',
'nexthop': '192.168.0.254'}])['external_segment']
for x in range(3)]
l3p_list = []
for x in xrange(len(es_list)):
l3p = self.create_l3_policy(
shared=False,
tenant_id=shared_es and 'another' or es_list[x]['tenant_id'],
external_segments={es_list[x]['id']: []},
expected_res_status=201)['l3_policy']
l3p_list.append(l3p)
prov = self._create_policy_rule_set_on_shared(
shared=shared_prs, tenant_id=es_list[0]['tenant_id'] if not (
shared_es | shared_prs) else 'another')
cons = self._create_policy_rule_set_on_shared(
shared=shared_prs, tenant_id=es_list[0]['tenant_id'] if not (
shared_es | shared_prs) else 'another')
ep = self.create_external_policy(
name=APIC_EXTERNAL_EPG,
external_segments=[x['id'] for x in es_list],
tenant_id=es_list[0]['tenant_id'] if not shared_es else 'another',
expected_res_status=201)['external_policy']
ep = self.update_external_policy(
ep['id'], expected_res_status=200, tenant_id=ep['tenant_id'],
provided_policy_rule_sets={prov['id']: ''},
consumed_policy_rule_sets={cons['id']: ''})['external_policy']
mgr = self.driver.apic_manager
owner = (es_list[0]['tenant_id'] if not shared_es
else self.common_tenant)
l3p_owner = l3p_list[0]['tenant_id']
expected_calls = []
nat = self.nat_enabled
sub_str = "Shd-%s-%s"
if is_edge_nat:
sub_str = "Auto-%s-%s"
for x in range(len(es_list)):
es = es_list[x]
l3p = l3p_list[x]
external_epg = APIC_EXTERNAL_EPG if self.pre_l3out else (
("default-%s" % es['id']) if nat else ep['id'])
ext_epg_tenant = (APIC_PRE_L3OUT_TENANT if self.pre_l3out else
owner)
es_name = es['name' if self.pre_l3out else 'id']
expected_calls.append(
mock.call(es_name,
("NAT-allow-%s" % es['id']) if nat else prov['id'],
external_epg=external_epg,
provided=True, owner=ext_epg_tenant,
transaction=mock.ANY))
expected_calls.append(
mock.call(es_name,
("NAT-allow-%s" % es['id']) if nat else cons['id'],
external_epg=external_epg,
provided=False, owner=ext_epg_tenant,
transaction=mock.ANY))
if nat:
expected_calls.append(
mock.call(sub_str % (l3p['id'], es['id']), prov['id'],
external_epg=(sub_str % (l3p['id'], ep['id'])),
provided=True, owner=l3p_owner,
transaction=mock.ANY))
expected_calls.append(
mock.call(sub_str % (l3p['id'], es['id']), cons['id'],
external_epg=(sub_str % (l3p['id'], ep['id'])),
provided=False, owner=l3p_owner,
transaction=mock.ANY))
self._check_call_list(expected_calls,
mgr.set_contract_for_external_epg.call_args_list)
ep = self.update_external_policy(
ep['id'], expected_res_status=200, provided_policy_rule_sets={},
consumed_policy_rule_sets={},
tenant_id=ep['tenant_id'])['external_policy']
expected_calls = []
for x in range(len(es_list)):
es = es_list[x]
l3p = l3p_list[x]
if nat:
expected_calls.append(
mock.call(sub_str % (l3p['id'], es['id']), prov['id'],
external_epg=(sub_str % (l3p['id'], ep['id'])),
provided=True, owner=l3p_owner,
transaction=mock.ANY))
expected_calls.append(
mock.call(sub_str % (l3p['id'], es['id']), cons['id'],
external_epg=(sub_str % (l3p['id'], ep['id'])),
provided=False, owner=l3p_owner,
transaction=mock.ANY))
else:
external_epg = (APIC_EXTERNAL_EPG if self.pre_l3out
else ep['id'])
ext_epg_tenant = (APIC_PRE_L3OUT_TENANT if self.pre_l3out else
owner)
es_name = es['name' if self.pre_l3out else 'id']
expected_calls.append(
mock.call(es_name, prov['id'],
external_epg=external_epg,
provided=True, owner=ext_epg_tenant,
transaction=mock.ANY))
expected_calls.append(
mock.call(es_name, cons['id'],
external_epg=external_epg,
provided=False, owner=ext_epg_tenant,
transaction=mock.ANY))
self._check_call_list(
expected_calls, mgr.unset_contract_for_external_epg.call_args_list)
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_update_add_prs_1(self):
self._test_update_add_prs(shared_es=True, shared_prs=True)
def test_update_add_prs_2(self):
self._test_update_add_prs(shared_es=True, shared_prs=False)
def test_update_add_prs_3(self):
self._test_update_add_prs(shared_es=False, shared_prs=False)
def test_update_add_prs_4(self):
self._test_update_add_prs(shared_es=False, shared_prs=True)
def test_update_add_prs_edge_nat_mode_1(self):
self._test_update_add_prs(shared_es=True, shared_prs=True,
is_edge_nat=True)
def test_update_add_prs_edge_nat_mode_2(self):
self._test_update_add_prs(shared_es=True, shared_prs=False,
is_edge_nat=True)
def test_update_add_prs_edge_nat_mode_3(self):
self._test_update_add_prs(shared_es=False, shared_prs=False,
is_edge_nat=True)
def test_update_add_prs_edge_nat_mode_4(self):
self._test_update_add_prs(shared_es=False, shared_prs=True,
is_edge_nat=True)
def test_update_add_prs_unsupported(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='unsupported', cidr='192.168.0.0/24', expected_res_status=201,
external_routes=[{'destination': '128.0.0.0/16',
'nexthop': '192.168.0.254'}])['external_segment']
prov = self._create_policy_rule_set_on_shared()
cons = self._create_policy_rule_set_on_shared()
ep = self.create_external_policy(
external_segments=[es['id']],
expected_res_status=201)['external_policy']
self.update_external_policy(
ep['id'], expected_res_status=200, tenant_id=ep['tenant_id'],
provided_policy_rule_sets={prov['id']: ''},
consumed_policy_rule_sets={cons['id']: ''})['external_policy']
mgr = self.driver.apic_manager
self.assertFalse(mgr.set_contract_for_external_epg.called)
def _test_multi_policy_single_tenant(self, shared_es):
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24',
expected_res_status=201, shared=shared_es,
external_routes=[{
'destination': '128.0.0.0/16',
'nexthop': '192.168.0.254'}])['external_segment']
owner = 'another' if shared_es else es['tenant_id']
self.create_external_policy(
external_segments=[es['id']],
tenant_id=owner,
expected_res_status=201)
res = self.create_external_policy(
external_segments=[es['id']],
tenant_id=owner,
expected_res_status=400)
self.assertEqual('MultipleExternalPoliciesForL3Policy',
res['NeutronError']['type'])
# create another external policy and update it to use external-segment
ep2 = self.create_external_policy(
tenant_id=owner,
expected_res_status=201)['external_policy']
res = self.update_external_policy(
ep2['id'], external_segments=[es['id']],
tenant_id=owner,
expected_res_status=400)
self.assertEqual('MultipleExternalPoliciesForL3Policy',
res['NeutronError']['type'])
def test_multi_policy_single_tenant_1(self):
self._test_multi_policy_single_tenant(True)
def test_multi_policy_single_tenant_2(self):
self._test_multi_policy_single_tenant(False)
def test_multi_policy_multi_tenant(self):
tenants = (['tenant_a', 'tenant_b', 'tenant_c']
if self.nat_enabled else ['tenant_a'])
self._mock_external_dict([('supported', '192.168.0.2/24')])
ext_routes = ['128.0.0.0/24', '128.0.1.0/24']
es_list = [
self.create_external_segment(
name='supported', cidr='192.168.0.0/24', shared=True,
expected_res_status=201,
external_routes=[{
'destination': ext_routes[x],
'nexthop': '192.168.0.254'}])['external_segment']
for x in range(2)]
l3p_list = []
for x in xrange(len(tenants)):
l3p = self.create_l3_policy(
shared=False,
tenant_id=tenants[x],
external_segments={x['id']: [] for x in es_list},
expected_res_status=201)['l3_policy']
l3p_list.append(l3p)
# create external-policy
ep_list = []
mgr = self.driver.apic_manager
for x in range(len(tenants)):
ep = self.create_external_policy(
name=APIC_EXTERNAL_EPG,
external_segments=[e['id'] for e in es_list],
tenant_id=tenants[x],
expected_res_status=201)['external_policy']
ep_list.append(ep)
l3p = l3p_list[x]
expected_calls = []
for es in es_list:
if self.nat_enabled:
if not self.pre_l3out:
expected_calls.append(
mock.call(es['id'], subnet='0.0.0.0/0',
external_epg="default-%s" % es['id'],
owner=self.common_tenant,
transaction=mock.ANY))
expected_calls.append(
mock.call("Shd-%s-%s" % (l3p['id'], es['id']),
subnet=es['external_routes'][0]['destination'],
external_epg=("Shd-%s-%s" % (l3p['id'], ep['id'])),
owner=tenants[x],
transaction=mock.ANY))
elif not self.pre_l3out:
expected_calls.append(
mock.call(es['id'],
subnet=es['external_routes'][0]['destination'],
external_epg=ep['id'], owner=self.common_tenant,
transaction=mock.ANY))
self._check_call_list(expected_calls,
mgr.ensure_external_epg_created.call_args_list)
mgr.ensure_external_epg_created.reset_mock()
# delete external-policy
expected_calls = []
for x in range(len(tenants)):
ep = ep_list[x]
self.delete_external_policy(
ep['id'], tenant_id=ep['tenant_id'],
expected_res_status=webob.exc.HTTPNoContent.code)
l3p = l3p_list[x]
for es in es_list:
if self.nat_enabled:
expected_calls.append(
mock.call("Shd-%s-%s" % (l3p['id'], es['id']),
external_epg=("Shd-%s-%s" % (l3p['id'], ep['id'])),
owner=tenants[x]))
elif not self.pre_l3out:
expected_calls.append(
mock.call(es['id'], external_epg=ep['id'],
owner=self.common_tenant))
if self.nat_enabled and not self.pre_l3out:
for es in es_list:
expected_calls.append(
mock.call(es['id'], external_epg="default-%s" % es['id'],
owner=self.common_tenant))
self._check_call_list(expected_calls,
mgr.ensure_external_epg_deleted.call_args_list)
class TestExternalPolicyNoNat(TestExternalPolicy):
def setUp(self):
super(TestExternalPolicyNoNat, self).setUp(nat_enabled=False)
class TestExternalPolicyPreL3Out(TestExternalPolicy):
def setUp(self):
super(TestExternalPolicyPreL3Out, self).setUp(
pre_existing_l3out=True)
def test_multi_tenant_delete(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24', shared=True,
expected_res_status=201)['external_segment']
ep_list = [
self.create_external_policy(
name=APIC_EXTERNAL_EPG,
external_segments=[es['id']],
tenant_id=tnnt,
expected_res_status=201)['external_policy']
for tnnt in ['tenant_a', 'tenant_b', 'tenant_c']]
for ep in ep_list:
self.delete_external_policy(
ep['id'], tenant_id=ep['tenant_id'],
expected_res_status=webob.exc.HTTPNoContent.code)
nat_contract = "NAT-allow-%s" % es['id']
expected_calls = [
mock.call(es['name'], nat_contract,
external_epg=APIC_EXTERNAL_EPG,
provided=True, owner=APIC_PRE_L3OUT_TENANT,
transaction=mock.ANY),
mock.call(es['name'], nat_contract,
external_epg=APIC_EXTERNAL_EPG,
provided=False, owner=APIC_PRE_L3OUT_TENANT,
transaction=mock.ANY)
]
mgr = self.driver.apic_manager
self._check_call_list(expected_calls,
mgr.unset_contract_for_external_epg.call_args_list)
def test_multi_tenant_update_dissociate(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24', shared=True,
expected_res_status=201)['external_segment']
ep_list = [
self.create_external_policy(
name=APIC_EXTERNAL_EPG,
external_segments=[es['id']],
tenant_id=tnnt,
expected_res_status=201)['external_policy']
for tnnt in ['tenant_a', 'tenant_b', 'tenant_c']]
for ep in ep_list:
self.update_external_policy(
ep['id'], tenant_id=ep['tenant_id'],
external_segments=[],
expected_res_status=200)
nat_contract = "NAT-allow-%s" % es['id']
expected_calls = [
mock.call(es['name'], nat_contract,
external_epg=APIC_EXTERNAL_EPG,
provided=True, owner=APIC_PRE_L3OUT_TENANT,
transaction=mock.ANY),
mock.call(es['name'], nat_contract,
external_epg=APIC_EXTERNAL_EPG,
provided=False, owner=APIC_PRE_L3OUT_TENANT,
transaction=mock.ANY)
]
mgr = self.driver.apic_manager
self._check_call_list(expected_calls,
mgr.unset_contract_for_external_epg.call_args_list)
class TestExternalPolicyNoNatPreL3Out(TestExternalPolicy):
def setUp(self):
super(TestExternalPolicyNoNatPreL3Out, self).setUp(
nat_enabled=False, pre_existing_l3out=True)
class TestNatPool(ApicMappingTestCase):
def test_overlap_nat_pool_create(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
mgr = self.driver.apic_manager
mgr.ext_net_dict['supported']['host_pool_cidr'] = '192.168.200.1/24'
es = self.create_external_segment(name='supported',
expected_res_status=webob.exc.HTTPCreated.code)['external_segment']
# cidr_exposed overlap
res = self.create_nat_pool(
external_segment_id=es['id'],
ip_version=4, ip_pool='192.168.0.0/24',
expected_res_status=webob.exc.HTTPBadRequest.code)
self.assertEqual('NatPoolOverlapsApicSubnet',
res['NeutronError']['type'])
# host-pool overlap
res = self.create_nat_pool(
external_segment_id=es['id'],
ip_version=4, ip_pool='192.168.200.0/24',
expected_res_status=webob.exc.HTTPBadRequest.code)
self.assertEqual('NatPoolOverlapsApicSubnet',
res['NeutronError']['type'])
def test_overlap_nat_pool_update(self):
self._mock_external_dict([('supported', '192.168.0.2/24'),
('supported1', '192.168.1.2/24')])
es1 = self.create_external_segment(name='supported',
expected_res_status=webob.exc.HTTPCreated.code)['external_segment']
es2 = self.create_external_segment(name='supported1',
expected_res_status=webob.exc.HTTPCreated.code)['external_segment']
nat_pool = self.create_nat_pool(
external_segment_id=es1['id'],
ip_version=4, ip_pool='192.168.1.0/24',
expected_res_status=webob.exc.HTTPCreated.code)['nat_pool']
res = self.update_nat_pool(nat_pool['id'],
external_segment_id=es2['id'],
expected_res_status=webob.exc.HTTPBadRequest.code)
self.assertEqual('NatPoolOverlapsApicSubnet',
res['NeutronError']['type'])
def _test_nat_bd_subnet_created_deleted(self, shared, is_edge_nat=False):
self._mock_external_dict([('supported', '192.168.0.2/24')],
is_edge_nat)
es = self.create_external_segment(name='supported',
expected_res_status=webob.exc.HTTPCreated.code,
shared=shared)['external_segment']
nat_pool = self.create_nat_pool(
external_segment_id=es['id'],
ip_version=4, ip_pool='192.168.1.0/24', shared=shared,
expected_res_status=webob.exc.HTTPCreated.code)['nat_pool']
owner = es['tenant_id'] if not shared else self.common_tenant
mgr = self.driver.apic_manager
if self.nat_enabled and not is_edge_nat:
mgr.ensure_subnet_created_on_apic.assert_called_with(
owner, "NAT-bd-%s" % es['id'], '192.168.1.1/24')
else:
self.assertFalse(mgr.ensure_subnet_created_on_apic.called)
self.delete_nat_pool(nat_pool['id'],
expected_res_status=webob.exc.HTTPNoContent.code)
if self.nat_enabled and not is_edge_nat:
mgr.ensure_subnet_deleted_on_apic.assert_called_with(
owner, "NAT-bd-%s" % es['id'], '192.168.1.1/24')
else:
self.assertFalse(mgr.ensure_subnet_deleted_on_apic.called)
def test_nat_bd_subnet_create_delete_unshared(self):
self._test_nat_bd_subnet_created_deleted(False)
def test_nat_bd_subnet_create_delete_shared(self):
self._test_nat_bd_subnet_created_deleted(True)
def test_nat_bd_subnet_create_delete_unshared_edge_nat(self):
self._test_nat_bd_subnet_created_deleted(False, is_edge_nat=True)
def test_nat_bd_subnet_create_delete_shared_edge_nat(self):
self._test_nat_bd_subnet_created_deleted(True, is_edge_nat=True)
def _test_nat_bd_subnet_updated(self, shared, is_edge_nat=False):
self._mock_external_dict([('supported', '192.168.0.2/24'),
('supported1', '192.168.10.2/24')],
is_edge_nat)
es1 = self.create_external_segment(name='supported',
expected_res_status=webob.exc.HTTPCreated.code,
shared=shared)['external_segment']
es2 = self.create_external_segment(name='supported1',
expected_res_status=webob.exc.HTTPCreated.code,
shared=shared)['external_segment']
nat_pool = self.create_nat_pool(
external_segment_id=es1['id'],
ip_version=4, ip_pool='192.168.1.0/24', shared=shared,
expected_res_status=webob.exc.HTTPCreated.code)['nat_pool']
owner = es1['tenant_id'] if not shared else self.common_tenant
mgr = self.driver.apic_manager
mgr.ensure_subnet_created_on_apic.reset_mock()
nat_pool = self.update_nat_pool(nat_pool['id'],
external_segment_id=es2['id'],
expected_res_status=webob.exc.HTTPOk.code)['nat_pool']
if self.nat_enabled and not is_edge_nat:
mgr.ensure_subnet_deleted_on_apic.assert_called_with(
owner, "NAT-bd-%s" % es1['id'], '192.168.1.1/24')
mgr.ensure_subnet_created_on_apic.assert_called_with(
owner, "NAT-bd-%s" % es2['id'], '192.168.1.1/24')
else:
self.assertFalse(mgr.ensure_subnet_created_on_apic.called)
self.assertFalse(mgr.ensure_subnet_deleted_on_apic.called)
def test_nat_bd_subnet_update_unshared(self):
self._test_nat_bd_subnet_updated(False)
def test_nat_bd_subnet_update_shared(self):
self._test_nat_bd_subnet_updated(True)
def test_nat_bd_subnet_update_unshared_edge_nat(self):
self._test_nat_bd_subnet_updated(False, is_edge_nat=True)
def test_nat_bd_subnet_update_shared_edge_nat(self):
self._test_nat_bd_subnet_updated(True, is_edge_nat=True)
def _test_create_fip(self, shared):
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(name='supported',
expected_res_status=webob.exc.HTTPCreated.code,
shared=shared)['external_segment']
self.create_nat_pool(external_segment_id=es['id'],
ip_version=4, ip_pool='192.168.1.0/24', shared=shared,
expected_res_status=webob.exc.HTTPCreated.code)
subnet = self._get_object('subnets', es['subnet_id'],
self.api)['subnet']
fip_dict = {'floating_network_id': subnet['network_id']}
fip = self.l3plugin.create_floatingip(
context.get_admin_context(),
{'floatingip': fip_dict,
'tenant_id': es['tenant_id']})
self.assertIsNotNone(fip)
self.assertTrue(
netaddr.IPAddress(fip['floating_ip_address']) in
netaddr.IPNetwork('192.168.1.0/24'))
def test_create_fip(self):
self._test_create_fip(False)
def test_create_fip_shared(self):
self._test_create_fip(True)
class TestNatPoolNoNat(TestNatPool):
def setUp(self):
super(TestNatPoolNoNat, self).setUp(nat_enabled=False)
| 242,267 | 965 | 7,649 |
87484e3ece20f96eee0532619677e15accd1c4e4 | 4,760 | py | Python | ResourceMonitor.py | Bot-7037/Resource-Monitor | 44c96606784d6138bbfbc0fd8254252bb676dbfc | [
"MIT"
] | 1 | 2021-11-21T05:26:06.000Z | 2021-11-21T05:26:06.000Z | ResourceMonitor.py | Bot-7037/Resource-Monitor | 44c96606784d6138bbfbc0fd8254252bb676dbfc | [
"MIT"
] | null | null | null | ResourceMonitor.py | Bot-7037/Resource-Monitor | 44c96606784d6138bbfbc0fd8254252bb676dbfc | [
"MIT"
] | null | null | null | import psutil
import pandas as pd
from datetime import datetime
from termcolor import colored
import GetInfo
import Notify
import time
import os
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--columns", default="name,cpu_usage,memory_usage,read_bytes,write_bytes,status,create_time,n_threads")
parser.add_argument("-s", "--sort-by", dest="sort_by",default="memory_usage")
parser.add_argument("--ascending", action="store_true")
parser.add_argument("-n", default=20)
parser.add_argument("-u", "--live-update", action="store_true")
parser.add_argument("--kill", dest="process_to_close")
parser.add_argument("--after", dest="duration", default=0)
args = parser.parse_args()
columns = args.columns
sort_by = args.sort_by
descending = args.ascending
n = int(args.n)
live_update = args.live_update
kill = args.process_to_close
duration = int(args.duration)
# Fix terminal size
if 'nt' in os.name:
while(1):
(width, height) = os.get_terminal_size()
if(int(height) < 33 or int(width)<120):
print(colored("Terminal size too small. Resize the terminal", 'red', attrs=['bold']))
else:
break
time.sleep(0.5)
os.system("cls")
else:
while(1):
height, width = os.popen('stty size', 'r').read().split()
if(int(height) < 33 or int(width)<120):
print(colored("Terminal size too small. Resize the terminal", 'red', attrs=['bold']))
else:
break
time.sleep(0.5)
os.system("clear")
processes = GetInfo.get_processes_info()
df = construct_dataframe(processes)
print_header()
if n == 0:
print(df.to_string())
elif n > 0:
print(df.head(n).to_string())
draw_graph()
while live_update:
processes = GetInfo.get_processes_info()
df = construct_dataframe(processes)
os.system("cls") if "nt" in os.name else os.system("clear")
print_header()
if n == 0:
print(colored(df.to_string(), 'red','on_white'))
elif n > 0:
print(colored(df.head(n).to_string(), 'red','on_white'))
draw_graph()
time.sleep(1)
if(kill):
kill_process(df.head(n).to_string(), kill, duration*60)
| 35 | 134 | 0.575 | import psutil
import pandas as pd
from datetime import datetime
from termcolor import colored
import GetInfo
import Notify
import time
import os
def print_header():
print("╔"+"═"*117,end="╗\n║")
print(colored("\t\t\t\t\t\t[= RESOURCE MONITOR =]\t\t\t\t\t\t ", "cyan", attrs=['bold']),end="║\n")
print("╚"+"═"*117+"╝")
def construct_dataframe(processes):
df = pd.DataFrame(processes)
df.set_index('pid', inplace=True)
df.sort_values(sort_by, inplace=True, ascending=descending)
df['memory_usage'] = df['memory_usage'].apply(get_size)
df['write_bytes'] = df['write_bytes'].apply(get_size)
df['read_bytes'] = df['read_bytes'].apply(get_size)
df['create_time'] = df['create_time'].apply(datetime.strftime, args=("%Y-%m-%d %H:%M:%S",)) # Correcting formats
df = df[columns.split(",")]
return df
def get_size(bytes):
for unit in ['', 'K', 'M', 'G', 'T', 'P']:
if bytes < 1024:
return f"{bytes:.2f}{unit}B"
bytes /= 1024
def kill_process(df, process, duration):
os.system("cls") if "nt" in os.name else os.system("clear")
while(duration > 0):
print(df)
print(f"Will close {process} in {duration} seconds")
duration-=1
time.sleep(1)
os.system("cls") if "nt" in os.name else os.system("clear")
if(duration == 60):
Notify.Notify("Attention", "Closing the {process} in a minute", 5)
duration -=5
for proc in psutil.process_iter():
if proc.name() == process:
proc.kill()
def draw_graph():
print("\n╔"+"═"*117,end="╗\n║")
# Print CPU Graph
cpu_usage = df['cpu_usage'].sum()
if(cpu_usage>100): cpu_usage=100
if(cpu_usage<1): cpu_usage=1
text = "CPU Usage\t"+"█"*int(cpu_usage) + int(100-cpu_usage+2)*" "
print(colored(text, "magenta", attrs=['bold']),end=" ║\n║")
#Print Memory graph
RAM = round(psutil.virtual_memory().total / (1024.0 **2))
def get_number(x):
if('MB' in x): return float(x[:-2])
else: return float(x[:-2])/1024
RAM_usage = df['memory_usage'].apply(get_number)
RAM_usage = (RAM_usage.sum())*100
text = "Memory Usage \t"+"█"*int(RAM_usage/ RAM) + int(100-int(RAM_usage/ RAM)+2)*" "
print(colored(text, "green", attrs=['bold']),end="║\n")
print("╚"+"═"*117+"╝")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--columns", default="name,cpu_usage,memory_usage,read_bytes,write_bytes,status,create_time,n_threads")
parser.add_argument("-s", "--sort-by", dest="sort_by",default="memory_usage")
parser.add_argument("--ascending", action="store_true")
parser.add_argument("-n", default=20)
parser.add_argument("-u", "--live-update", action="store_true")
parser.add_argument("--kill", dest="process_to_close")
parser.add_argument("--after", dest="duration", default=0)
args = parser.parse_args()
columns = args.columns
sort_by = args.sort_by
descending = args.ascending
n = int(args.n)
live_update = args.live_update
kill = args.process_to_close
duration = int(args.duration)
# Fix terminal size
if 'nt' in os.name:
while(1):
(width, height) = os.get_terminal_size()
if(int(height) < 33 or int(width)<120):
print(colored("Terminal size too small. Resize the terminal", 'red', attrs=['bold']))
else:
break
time.sleep(0.5)
os.system("cls")
else:
while(1):
height, width = os.popen('stty size', 'r').read().split()
if(int(height) < 33 or int(width)<120):
print(colored("Terminal size too small. Resize the terminal", 'red', attrs=['bold']))
else:
break
time.sleep(0.5)
os.system("clear")
processes = GetInfo.get_processes_info()
df = construct_dataframe(processes)
print_header()
if n == 0:
print(df.to_string())
elif n > 0:
print(df.head(n).to_string())
draw_graph()
while live_update:
processes = GetInfo.get_processes_info()
df = construct_dataframe(processes)
os.system("cls") if "nt" in os.name else os.system("clear")
print_header()
if n == 0:
print(colored(df.to_string(), 'red','on_white'))
elif n > 0:
print(colored(df.head(n).to_string(), 'red','on_white'))
draw_graph()
time.sleep(1)
if(kill):
kill_process(df.head(n).to_string(), kill, duration*60)
| 2,165 | 0 | 125 |
8d272e209965eda5ff24423747a5f6f4591b2b0c | 1,369 | py | Python | shutdown.py | liusl104/py_sync_binlog | 33a67f545159767d38a522d28d2f79b3ac3802ca | [
"Apache-2.0"
] | 3 | 2018-09-18T03:29:33.000Z | 2020-01-13T03:34:39.000Z | shutdown.py | liusl104/py_sync_binlog | 33a67f545159767d38a522d28d2f79b3ac3802ca | [
"Apache-2.0"
] | null | null | null | shutdown.py | liusl104/py_sync_binlog | 33a67f545159767d38a522d28d2f79b3ac3802ca | [
"Apache-2.0"
] | 1 | 2022-01-25T09:39:17.000Z | 2022-01-25T09:39:17.000Z | # encoding=utf8
import os
import socket
from sync_binlog.output_log import logger as loging
import time
import sys
from sync_binlog.update_post import update_datetime
try:
import psutil
except ImportError:
print("psutil 模块不存在,请使用 pip install psutil 安装")
sys.exit(0)
# Shutdown complete
if __name__ == "__main__":
print("%sStarting shutdown..." % update_datetime())
shutdown_program()
time.sleep(3)
process_id = judgeprocess('startup.py')
if process_id is not False:
psutil.Process(process_id).kill()
print("%sShutdown complete" % update_datetime())
loging.info("Shutdown complete")
else:
print("%s程序自动关闭,请手工检查" % update_datetime())
loging.info("程序自动关闭,请手工检查")
| 24.890909 | 63 | 0.637692 | # encoding=utf8
import os
import socket
from sync_binlog.output_log import logger as loging
import time
import sys
from sync_binlog.update_post import update_datetime
try:
import psutil
except ImportError:
print("psutil 模块不存在,请使用 pip install psutil 安装")
sys.exit(0)
def shutdown_program():
hostname = socket.gethostname()
if os.path.exists('%s.pid' % hostname):
os.remove('%s.pid' % hostname)
loging.info("Starting shutdown...")
else:
print('%s%s.pid 文件不存在' % (update_datetime(), hostname))
loging.warn('%s.pid 文件不存在' % hostname)
def judgeprocess(processname):
pl = psutil.pids()
for pid in pl:
try:
cmdlines = psutil.Process(pid).cmdline()
except Exception:
continue
for cmdline in cmdlines:
if processname in cmdline:
return pid
else:
return False
# Shutdown complete
if __name__ == "__main__":
print("%sStarting shutdown..." % update_datetime())
shutdown_program()
time.sleep(3)
process_id = judgeprocess('startup.py')
if process_id is not False:
psutil.Process(process_id).kill()
print("%sShutdown complete" % update_datetime())
loging.info("Shutdown complete")
else:
print("%s程序自动关闭,请手工检查" % update_datetime())
loging.info("程序自动关闭,请手工检查")
| 601 | 0 | 46 |
e1c34e1f2ca887b5b7509177103c082a02ee4201 | 337 | py | Python | example/test/T7_duocaiyinyuebang.py | Michael8968/skulpt | 15956a60398fac92ee1dab25bf661ffc003b2eaf | [
"MIT"
] | 2 | 2021-12-18T06:34:26.000Z | 2022-01-05T05:08:47.000Z | example/test/T8_duocaiyinyuebang.py | Michael8968/skulpt | 15956a60398fac92ee1dab25bf661ffc003b2eaf | [
"MIT"
] | null | null | null | example/test/T8_duocaiyinyuebang.py | Michael8968/skulpt | 15956a60398fac92ee1dab25bf661ffc003b2eaf | [
"MIT"
] | null | null | null | import turtle
turtle.mode("logo")
turtle.shape("turtle")
turtle.bgcolor("black")
turtle.hideturtle()
turtle.pensize(12)
turtle.colormode(255)
s = 50
a = 0
for i in range(10):
turtle.pencolor(200-a, a, 100)
turtle.pu()
turtle.goto(25*i, 0)
turtle.pd()
turtle.forward(s)
a = a + 20
s = s + 10
turtle.done()
| 14.652174 | 34 | 0.62908 | import turtle
turtle.mode("logo")
turtle.shape("turtle")
turtle.bgcolor("black")
turtle.hideturtle()
turtle.pensize(12)
turtle.colormode(255)
s = 50
a = 0
for i in range(10):
turtle.pencolor(200-a, a, 100)
turtle.pu()
turtle.goto(25*i, 0)
turtle.pd()
turtle.forward(s)
a = a + 20
s = s + 10
turtle.done()
| 0 | 0 | 0 |
d73f56d089f03fc6f77ef649bdcfbef43da2b862 | 1,144 | py | Python | docs/scripts/cluster_add_k8s.py | pramaku/hpecp-python-library | 55550a1e27259a3132ea0608e66719e9732fb081 | [
"Apache-2.0"
] | null | null | null | docs/scripts/cluster_add_k8s.py | pramaku/hpecp-python-library | 55550a1e27259a3132ea0608e66719e9732fb081 | [
"Apache-2.0"
] | null | null | null | docs/scripts/cluster_add_k8s.py | pramaku/hpecp-python-library | 55550a1e27259a3132ea0608e66719e9732fb081 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
from hpecp import ContainerPlatformClient, APIException
from hpecp.k8s_cluster import K8sClusterHostConfig
import textwrap
client = ContainerPlatformClient(username='admin',
password='admin123',
api_host='127.0.0.1',
api_port=8080,
use_ssl=True,
verify_ssl='/certs/hpecp-ca-cert.pem')
client.create_session()
print( client.k8s_worker.get_k8shosts().tabulate() )
try:
k8shosts_config=[
K8sClusterHostConfig(4, 'worker'),
K8sClusterHostConfig(5, 'master')
]
k8s_cluster_id = client.k8s_cluster.create(name='def', description='my cluster', k8s_version='1.17.0', k8shosts_config=k8shosts_config)
print('creating cluster id: ' + k8s_cluster_id)
except APIException as e:
text = """APIException(
Backend API Response -> {}
HTTP Method -> {}
Request URL -> {}
Request Data -> [{}]
)"""
print( textwrap.dedent(text).format(e.message, e.request_method, e.request_url, e.request_data) ) | 35.75 | 139 | 0.604021 | #!/usr/bin/env python3
from hpecp import ContainerPlatformClient, APIException
from hpecp.k8s_cluster import K8sClusterHostConfig
import textwrap
client = ContainerPlatformClient(username='admin',
password='admin123',
api_host='127.0.0.1',
api_port=8080,
use_ssl=True,
verify_ssl='/certs/hpecp-ca-cert.pem')
client.create_session()
print( client.k8s_worker.get_k8shosts().tabulate() )
try:
k8shosts_config=[
K8sClusterHostConfig(4, 'worker'),
K8sClusterHostConfig(5, 'master')
]
k8s_cluster_id = client.k8s_cluster.create(name='def', description='my cluster', k8s_version='1.17.0', k8shosts_config=k8shosts_config)
print('creating cluster id: ' + k8s_cluster_id)
except APIException as e:
text = """APIException(
Backend API Response -> {}
HTTP Method -> {}
Request URL -> {}
Request Data -> [{}]
)"""
print( textwrap.dedent(text).format(e.message, e.request_method, e.request_url, e.request_data) ) | 0 | 0 | 0 |
ff3dcdc0b12675c40e2b4e49025d944716d7d7ae | 1,195 | py | Python | datasets/data_splitter.py | AjayMudhai/pytorch-CycleGAN-and-pix2pix | 64fcf0b926e2125042a559b0fb6a4a57559923c2 | [
"BSD-3-Clause"
] | null | null | null | datasets/data_splitter.py | AjayMudhai/pytorch-CycleGAN-and-pix2pix | 64fcf0b926e2125042a559b0fb6a4a57559923c2 | [
"BSD-3-Clause"
] | null | null | null | datasets/data_splitter.py | AjayMudhai/pytorch-CycleGAN-and-pix2pix | 64fcf0b926e2125042a559b0fb6a4a57559923c2 | [
"BSD-3-Clause"
] | null | null | null | import os
import shutil
# wbg_pth='/datadrive/Reflection/training_data/wbg'
# img_pth='/datadrive/Reflection/training_data/images'
# dst_pth='/datadrive/Reflection/training_data/valB'
# move_data(wbg_pth,img_pth,dst_pth)
wbg_pth='/datadrive/Reflection/training_data/wbg'
trainA_pth='/datadrive/pytorch-CycleGAN-and-pix2pix/datasets/cars/trainA'
imgs_pth='/datadrive/Reflection/training_data/images'
trainB_pth='/datadrive/pytorch-CycleGAN-and-pix2pix/datasets/cars/trainB'
move_data(wbg_pth,trainA_pth,imgs_pth,trainB_pth) | 35.147059 | 73 | 0.702092 | import os
import shutil
def move_data(wbg_pth,img_pth,dst_pth):
for root,dirs,files in os.walk(wbg_pth):
for file in files:
op=os.path.join(img_pth,file)
nnp=os.path.join(dst_pth,file)
shutil.move(op,nnp)
# wbg_pth='/datadrive/Reflection/training_data/wbg'
# img_pth='/datadrive/Reflection/training_data/images'
# dst_pth='/datadrive/Reflection/training_data/valB'
# move_data(wbg_pth,img_pth,dst_pth)
def move_data(wbg_pth,trainA_pth,imgs_pth,trainB_pth):
for root,dirs,files in os.walk(wbg_pth):
for file in files:
ta_op=os.path.join(root,file)
ta_nnp=os.path.join(trainA_pth,file)
tb_op=os.path.join(imgs_pth,file)
tb_nnp=os.path.join(trainB_pth,file)
if os.path.exists(tb_op):
shutil.move(ta_op,ta_nnp)
shutil.move(tb_op,tb_nnp)
wbg_pth='/datadrive/Reflection/training_data/wbg'
trainA_pth='/datadrive/pytorch-CycleGAN-and-pix2pix/datasets/cars/trainA'
imgs_pth='/datadrive/Reflection/training_data/images'
trainB_pth='/datadrive/pytorch-CycleGAN-and-pix2pix/datasets/cars/trainB'
move_data(wbg_pth,trainA_pth,imgs_pth,trainB_pth) | 620 | 0 | 46 |
5679b81204b649dc0cd086786518ea8025d21ff4 | 2,711 | py | Python | venv/lib/python3.8/site-packages/azureml/_base_sdk_common/field_info.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/azureml/_base_sdk_common/field_info.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/azureml/_base_sdk_common/field_info.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""A class for storing the field information."""
class _FieldInfo(object):
"""A class for storing the field information."""
def __init__(self, field_type, documentation, list_element_type=None, user_keys=False, serialized_name=None,
exclude_if_none=False):
"""Class FieldInfo constructor.
:param field_type: The data type of field.
:type field_type: object
:param documentation: The field information
:type documentation: str
:param list_element_type: The type of list element.
:type list_element_type: object
:param user_keys: user_keys=True, if keys in the value of the field are user keys.
user keys are not case normalized.
:type user_keys: bool
:param serialized_name:
:type serialized_name: str
:param exclude_if_none: Exclude from serialized output if value is None.
:type exclude_if_none: bool
"""
self._field_type = field_type
self._documentation = documentation
self._list_element_type = list_element_type
self._user_keys = user_keys
self._serialized_name = serialized_name
self._exclude_if_none = exclude_if_none
@property
def field_type(self):
"""Get field type.
:return: Returns the field type.
:rtype: object
"""
return self._field_type
@property
def documentation(self):
"""Return documentation.
:return: Returns the documentation.
:rtype: str
"""
return self._documentation
@property
def list_element_type(self):
"""Get list element type.
:return: Returns the list element type.
:rtype: object
"""
return self._list_element_type
@property
def user_keys(self):
"""Get user keys setting.
:return: Returns the user keys setting.
:rtype: bool
"""
return self._user_keys
@property
def serialized_name(self):
"""Get serialized name.
:return: Returns the serialized name.
:rtype: str
"""
return self._serialized_name
@property
def exclude_if_none(self):
"""Get whether to exclude None from serialized output.
:return: Returns whether to exclude None form serialized output.
:rtype: bool
"""
return self._exclude_if_none
| 30.806818 | 113 | 0.57986 | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""A class for storing the field information."""
class _FieldInfo(object):
"""A class for storing the field information."""
def __init__(self, field_type, documentation, list_element_type=None, user_keys=False, serialized_name=None,
exclude_if_none=False):
"""Class FieldInfo constructor.
:param field_type: The data type of field.
:type field_type: object
:param documentation: The field information
:type documentation: str
:param list_element_type: The type of list element.
:type list_element_type: object
:param user_keys: user_keys=True, if keys in the value of the field are user keys.
user keys are not case normalized.
:type user_keys: bool
:param serialized_name:
:type serialized_name: str
:param exclude_if_none: Exclude from serialized output if value is None.
:type exclude_if_none: bool
"""
self._field_type = field_type
self._documentation = documentation
self._list_element_type = list_element_type
self._user_keys = user_keys
self._serialized_name = serialized_name
self._exclude_if_none = exclude_if_none
@property
def field_type(self):
"""Get field type.
:return: Returns the field type.
:rtype: object
"""
return self._field_type
@property
def documentation(self):
"""Return documentation.
:return: Returns the documentation.
:rtype: str
"""
return self._documentation
@property
def list_element_type(self):
"""Get list element type.
:return: Returns the list element type.
:rtype: object
"""
return self._list_element_type
@property
def user_keys(self):
"""Get user keys setting.
:return: Returns the user keys setting.
:rtype: bool
"""
return self._user_keys
@property
def serialized_name(self):
"""Get serialized name.
:return: Returns the serialized name.
:rtype: str
"""
return self._serialized_name
@property
def exclude_if_none(self):
"""Get whether to exclude None from serialized output.
:return: Returns whether to exclude None form serialized output.
:rtype: bool
"""
return self._exclude_if_none
| 0 | 0 | 0 |
6f3cd2cc7dc0d6471de7c34578d22cf3a32749f4 | 3,440 | py | Python | GmailWrapper_JE/venv/Lib/site-packages/google/auth/crypt/__init__.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | GmailWrapper_JE/venv/Lib/site-packages/google/auth/crypt/__init__.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | GmailWrapper_JE/venv/Lib/site-packages/google/auth/crypt/__init__.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | # Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cryptography helpers for verifying and signing messages.
The simplest way to verify signatures is using :func:`verify_signature`::
cert = open('certs.pem').read()
valid = crypt.verify_signature(message, signature, cert)
If you're going to verify many messages with the same certificate, you can use
:class:`RSAVerifier`::
cert = open('certs.pem').read()
verifier = crypt.RSAVerifier.from_string(cert)
valid = verifier.verify(message, signature)
To sign messages use :class:`RSASigner` with a private key::
private_key = open('private_key.pem').read()
signer = crypt.RSASigner.from_string(private_key)
signature = signer.sign(message)
The code above also works for :class:`ES256Signer` and :class:`ES256Verifier`.
Note that these two classes are only available if your `cryptography` dependency
version is at least 1.4.0.
"""
import six
from google.auth.crypt import base
from google.auth.crypt import rsa
try:
from google.auth.crypt import es256
except ImportError: # pragma: NO COVER
es256 = None
if es256 is not None: # pragma: NO COVER
__all__ = [
"ES256Signer",
"ES256Verifier",
"RSASigner",
"RSAVerifier",
"Signer",
"Verifier",
]
else: # pragma: NO COVER
__all__ = ["RSASigner", "RSAVerifier", "Signer", "Verifier"]
# Aliases to maintain the v1.0.0 interface, as the crypt module was split
# into submodules.
Signer = base.Signer
Verifier = base.Verifier
RSASigner = rsa.RSASigner
RSAVerifier = rsa.RSAVerifier
if es256 is not None: # pragma: NO COVER
ES256Signer = es256.ES256Signer
ES256Verifier = es256.ES256Verifier
def verify_signature(message, signature, certs, verifier_cls=rsa.RSAVerifier):
"""Verify an RSA or ECDSA cryptographic signature.
Checks that the provided ``signature`` was generated from ``bytes`` using
the private key associated with the ``cert``.
Args:
message (Union[str, bytes]): The plaintext message.
signature (Union[str, bytes]): The cryptographic signature to check.
certs (Union[Sequence, str, bytes]): The certificate or certificates
to use to check the signature.
verifier_cls (Optional[~google.auth.crypt.base.Signer]): Which verifier
class to use for verification. This can be used to select different
algorithms, such as RSA or ECDSA. Default value is :class:`RSAVerifier`.
Returns:
bool: True if the signature is valid, otherwise False.
"""
if isinstance(certs, (six.text_type, six.binary_type)):
certs = [certs]
for cert in certs:
verifier = verifier_cls.from_string(cert)
if verifier.verify(message, signature):
return True
return False
| 34.059406 | 85 | 0.684302 | # Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cryptography helpers for verifying and signing messages.
The simplest way to verify signatures is using :func:`verify_signature`::
cert = open('certs.pem').read()
valid = crypt.verify_signature(message, signature, cert)
If you're going to verify many messages with the same certificate, you can use
:class:`RSAVerifier`::
cert = open('certs.pem').read()
verifier = crypt.RSAVerifier.from_string(cert)
valid = verifier.verify(message, signature)
To sign messages use :class:`RSASigner` with a private key::
private_key = open('private_key.pem').read()
signer = crypt.RSASigner.from_string(private_key)
signature = signer.sign(message)
The code above also works for :class:`ES256Signer` and :class:`ES256Verifier`.
Note that these two classes are only available if your `cryptography` dependency
version is at least 1.4.0.
"""
import six
from google.auth.crypt import base
from google.auth.crypt import rsa
try:
from google.auth.crypt import es256
except ImportError: # pragma: NO COVER
es256 = None
if es256 is not None: # pragma: NO COVER
__all__ = [
"ES256Signer",
"ES256Verifier",
"RSASigner",
"RSAVerifier",
"Signer",
"Verifier",
]
else: # pragma: NO COVER
__all__ = ["RSASigner", "RSAVerifier", "Signer", "Verifier"]
# Aliases to maintain the v1.0.0 interface, as the crypt module was split
# into submodules.
Signer = base.Signer
Verifier = base.Verifier
RSASigner = rsa.RSASigner
RSAVerifier = rsa.RSAVerifier
if es256 is not None: # pragma: NO COVER
ES256Signer = es256.ES256Signer
ES256Verifier = es256.ES256Verifier
def verify_signature(message, signature, certs, verifier_cls=rsa.RSAVerifier):
"""Verify an RSA or ECDSA cryptographic signature.
Checks that the provided ``signature`` was generated from ``bytes`` using
the private key associated with the ``cert``.
Args:
message (Union[str, bytes]): The plaintext message.
signature (Union[str, bytes]): The cryptographic signature to check.
certs (Union[Sequence, str, bytes]): The certificate or certificates
to use to check the signature.
verifier_cls (Optional[~google.auth.crypt.base.Signer]): Which verifier
class to use for verification. This can be used to select different
algorithms, such as RSA or ECDSA. Default value is :class:`RSAVerifier`.
Returns:
bool: True if the signature is valid, otherwise False.
"""
if isinstance(certs, (six.text_type, six.binary_type)):
certs = [certs]
for cert in certs:
verifier = verifier_cls.from_string(cert)
if verifier.verify(message, signature):
return True
return False
| 0 | 0 | 0 |
139954145bee80fddd7c595da934e0de4728e34e | 84 | py | Python | hw7/ch16/automate_online-materials/sameNameError.py | JWiliams/csc221 | 0653dcb5f185e8517be9146e17b580f62d4930e6 | [
"CC0-1.0"
] | null | null | null | hw7/ch16/automate_online-materials/sameNameError.py | JWiliams/csc221 | 0653dcb5f185e8517be9146e17b580f62d4930e6 | [
"CC0-1.0"
] | null | null | null | hw7/ch16/automate_online-materials/sameNameError.py | JWiliams/csc221 | 0653dcb5f185e8517be9146e17b580f62d4930e6 | [
"CC0-1.0"
] | null | null | null |
eggs = 'global'
spam() | 14 | 24 | 0.571429 | def spam():
print(eggs) # ERROR!
eggs = 'spam local'
eggs = 'global'
spam() | 39 | 0 | 22 |
49e036fce5023369b407b2429dceb9099d47e801 | 1,209 | py | Python | scripts/release_helper/utils.py | DavidZeLiang/azure-sdk-for-python | b343247adc7c3c7ff52d6eadeca6b57eb0a23047 | [
"MIT"
] | null | null | null | scripts/release_helper/utils.py | DavidZeLiang/azure-sdk-for-python | b343247adc7c3c7ff52d6eadeca6b57eb0a23047 | [
"MIT"
] | null | null | null | scripts/release_helper/utils.py | DavidZeLiang/azure-sdk-for-python | b343247adc7c3c7ff52d6eadeca6b57eb0a23047 | [
"MIT"
] | null | null | null | from github.Issue import Issue
from github.Repository import Repository
import logging
from typing import List
REQUEST_REPO = 'Azure/sdk-release-request'
REST_REPO = 'Azure/azure-rest-api-specs'
AUTO_ASSIGN_LABEL = 'assigned'
AUTO_PARSE_LABEL = 'auto-link'
_LOG = logging.getLogger(__name__)
| 31 | 87 | 0.635236 | from github.Issue import Issue
from github.Repository import Repository
import logging
from typing import List
REQUEST_REPO = 'Azure/sdk-release-request'
REST_REPO = 'Azure/azure-rest-api-specs'
AUTO_ASSIGN_LABEL = 'assigned'
AUTO_PARSE_LABEL = 'auto-link'
_LOG = logging.getLogger(__name__)
def get_origin_link_and_tag(issue_body_list: List[str]) -> (str, str):
link, readme_tag = '', ''
for row in issue_body_list:
if 'link' in row.lower():
link = row.split(":", 1)[-1].strip()
if 'readme tag' in row.lower():
readme_tag = row.split(":", 1)[-1].strip()
if link and readme_tag:
break
if link.count('https') > 1:
link = link.split(']')[0]
link = link.replace('[', "").replace(']', "").replace('(', "").replace(')', "")
return link, readme_tag
class IssuePackage:
issue = None # origin issue instance
rest_repo = None # repo instance: Azure/azure-rest-api-specs
labels_name = {} # name set of issue labels
def __init__(self, issue: Issue, rest_repo: Repository):
self.issue = issue
self.rest_repo = rest_repo
self.labels_name = {label.name for label in issue.labels}
| 685 | 182 | 46 |
11de465540347c176ba804221d719eaf15627d08 | 2,350 | py | Python | ex9/api_processor.py | Maheliusz/nlp_lab | 49e5c9dfe81d94bac4323e044502d1b73c99ce3c | [
"MIT"
] | null | null | null | ex9/api_processor.py | Maheliusz/nlp_lab | 49e5c9dfe81d94bac4323e044502d1b73c99ce3c | [
"MIT"
] | null | null | null | ex9/api_processor.py | Maheliusz/nlp_lab | 49e5c9dfe81d94bac4323e044502d1b73c99ce3c | [
"MIT"
] | null | null | null | import argparse
import os
import random
import sys
import time
import requests
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, help='Path to text files with bills', required=True)
parser.add_argument('--count', type=int, help='How much files process', required=False, default=20)
args = parser.parse_args()
url = 'http://ws.clarin-pl.eu/nlprest2'
parsed = {}
id2file = {}
already_parsed = os.listdir(args.path + 'ner/')
count = min(args.count, 100 - len(already_parsed))
directory_contents = random.sample(list(filter(lambda entry: os.path.isfile(args.path + entry)
and entry not in already_parsed,
os.listdir(args.path))),
k=count)
for filename in directory_contents:
with open(args.path + filename, encoding='utf-8') as file:
response = requests.post(url=url + '/base/startTask',
json={'text': file.read(), 'lpmn': 'any2txt|wcrft2|liner2({"model":"n82"})',
'user': ''})
response_string = str(response.content).replace("b\'", "").replace("\'", "")
id2file[response_string] = filename
parsed[response_string] = {"value": None, "status": None}
print("{} read and sent".format(filename))
id_list = list(parsed.keys())
print("Finished reading files")
counter = 0
while len(id_list) > 0:
for id in id_list:
parsed[id] = requests.get(url=url + '/base/getStatus/' + str(id)).json()
if parsed[id]['status'] == 'DONE':
counter += 1
with open(args.path + 'ner/' + id2file[id], 'wb') as file:
for element in parsed[id]['value']:
# print(requests.get(url=url + '/base/download' + element['fileID']).content)
# file.write(str(requests.get(url=url + '/base/download' + element['fileID']).content)[2:-1])
file.write(requests.get(url=url + '/base/download' + element['fileID']).content)
id_list.remove(id)
print("{} finished".format(counter))
elif parsed[id]['status'] == 'ERROR':
print(parsed[id]['value'], file=sys.stderr)
exit(-1)
time.sleep(2)
print('{} docs left'.format(len(id_list)))
| 41.22807 | 113 | 0.570638 | import argparse
import os
import random
import sys
import time
import requests
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, help='Path to text files with bills', required=True)
parser.add_argument('--count', type=int, help='How much files process', required=False, default=20)
args = parser.parse_args()
url = 'http://ws.clarin-pl.eu/nlprest2'
parsed = {}
id2file = {}
already_parsed = os.listdir(args.path + 'ner/')
count = min(args.count, 100 - len(already_parsed))
directory_contents = random.sample(list(filter(lambda entry: os.path.isfile(args.path + entry)
and entry not in already_parsed,
os.listdir(args.path))),
k=count)
for filename in directory_contents:
with open(args.path + filename, encoding='utf-8') as file:
response = requests.post(url=url + '/base/startTask',
json={'text': file.read(), 'lpmn': 'any2txt|wcrft2|liner2({"model":"n82"})',
'user': ''})
response_string = str(response.content).replace("b\'", "").replace("\'", "")
id2file[response_string] = filename
parsed[response_string] = {"value": None, "status": None}
print("{} read and sent".format(filename))
id_list = list(parsed.keys())
print("Finished reading files")
counter = 0
while len(id_list) > 0:
for id in id_list:
parsed[id] = requests.get(url=url + '/base/getStatus/' + str(id)).json()
if parsed[id]['status'] == 'DONE':
counter += 1
with open(args.path + 'ner/' + id2file[id], 'wb') as file:
for element in parsed[id]['value']:
# print(requests.get(url=url + '/base/download' + element['fileID']).content)
# file.write(str(requests.get(url=url + '/base/download' + element['fileID']).content)[2:-1])
file.write(requests.get(url=url + '/base/download' + element['fileID']).content)
id_list.remove(id)
print("{} finished".format(counter))
elif parsed[id]['status'] == 'ERROR':
print(parsed[id]['value'], file=sys.stderr)
exit(-1)
time.sleep(2)
print('{} docs left'.format(len(id_list)))
| 0 | 0 | 0 |
5dedd4b58a8257ac092a43187da5519e0e4f4069 | 732 | py | Python | src/routes/v1/faculties.py | university-my/ultimate-schedule-api | 6dbf2368da8751a8b6105c8d783a4b105f99866d | [
"MIT"
] | 5 | 2020-04-18T16:33:50.000Z | 2021-09-30T09:24:56.000Z | src/routes/v1/faculties.py | university-my/ultimate-schedule-api | 6dbf2368da8751a8b6105c8d783a4b105f99866d | [
"MIT"
] | 15 | 2020-04-18T13:03:26.000Z | 2021-12-13T20:44:54.000Z | src/routes/v1/faculties.py | university-my/ultimate-schedule-api | 6dbf2368da8751a8b6105c8d783a4b105f99866d | [
"MIT"
] | 2 | 2020-05-30T20:51:45.000Z | 2021-09-28T10:32:12.000Z | from fastapi import APIRouter
from src.utils.events import Events
from src.schemas.schema import x_schedule_header
from src.controllers.faculties_controller import get_all_faculties, is_faculty_exists
from src.utils.tracking import track
tag = "Faculties"
router = APIRouter()
@router.get("", tags=[tag])
@track(fmt="", event=Events.GET_ALL_FACULTIES)
@router.get("/exists", tags=[tag])
@track(fmt="query={query}", event=Events.IS_FACULTY_EXISTS)
| 34.857143 | 85 | 0.79235 | from fastapi import APIRouter
from src.utils.events import Events
from src.schemas.schema import x_schedule_header
from src.controllers.faculties_controller import get_all_faculties, is_faculty_exists
from src.utils.tracking import track
tag = "Faculties"
router = APIRouter()
@router.get("", tags=[tag])
@track(fmt="", event=Events.GET_ALL_FACULTIES)
async def faculties(*, schedule_url: str = x_schedule_header):
return await get_all_faculties(schedule_url=schedule_url)
@router.get("/exists", tags=[tag])
@track(fmt="query={query}", event=Events.IS_FACULTY_EXISTS)
async def faculty_exists(*, query: str, schedule_url: str = x_schedule_header):
return await is_faculty_exists(schedule_url=schedule_url, query=query)
| 236 | 0 | 44 |
20b200530b1cf1e5a75bb3eada9fd29120296117 | 11,062 | py | Python | ckanext/ksext/controllers/MUser.py | WilJoey/ckanext-ksext | 1f3383d34beb35702d5bf0799defa5398f207ce2 | [
"MIT"
] | null | null | null | ckanext/ksext/controllers/MUser.py | WilJoey/ckanext-ksext | 1f3383d34beb35702d5bf0799defa5398f207ce2 | [
"MIT"
] | null | null | null | ckanext/ksext/controllers/MUser.py | WilJoey/ckanext-ksext | 1f3383d34beb35702d5bf0799defa5398f207ce2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import ckan.plugins as p
#from ckan.lib.base import BaseController, config
import ckan.lib.base as base
import ckan.lib.helpers as h
import ckan.model as model
import ckan.logic as logic
import ckan.logic.schema as schema
import ckan.new_authz as new_authz
import ckan.lib.captcha as captcha
import ckan.lib.navl.dictization_functions as dictization_functions
import functools
import requests
from sqlalchemy import text
import logging
from pylons import config
from ckan.common import _, c, g, request, response
c = base.c
request = base.request
log = logging.getLogger(__name__)
| 37.120805 | 175 | 0.593835 | # -*- coding: utf-8 -*-
import ckan.plugins as p
#from ckan.lib.base import BaseController, config
import ckan.lib.base as base
import ckan.lib.helpers as h
import ckan.model as model
import ckan.logic as logic
import ckan.logic.schema as schema
import ckan.new_authz as new_authz
import ckan.lib.captcha as captcha
import ckan.lib.navl.dictization_functions as dictization_functions
import functools
import requests
from sqlalchemy import text
import logging
from pylons import config
from ckan.common import _, c, g, request, response
c = base.c
request = base.request
log = logging.getLogger(__name__)
class MUserController(base.BaseController):
def org_admin_update(self):
result = { "success": False }
user_id = request.POST.get('user', '')
dataset_id = request.POST.get('ds','')
self._update_org_admin(user_id, dataset_id)
result["success"]=True
response.headers['Content-Type'] = 'application/json;charset=utf-8'
return h.json.dumps(result)
def _update_org_admin(self, user_id, dataset_id):
sql = "update package set creator_user_id=%s where id=%s ;"
model.meta.engine.execute(sql, user_id, dataset_id)
model.Session.commit()
def org_admin(self):
org_id = request.GET.get('org', '')
dataset_id = request.GET.get('ds','')
result = {
"org_users" : self._get_org_users(org_id),
"manager" : self._get_dataset_manager(dataset_id)
}
response.headers['Content-Type'] = 'application/json;charset=utf-8'
return h.json.dumps(result)
def _get_org_users(self, org_id):
sql = '''
select u.id, u.fullname as name from member m left join "user" u on u.id=m.table_id where m.group_id=:org_id and m.state='active' and m.table_name='user' and u.state='active';
'''
dt = model.Session.execute(sql, {'org_id': org_id}).fetchall()
result = [dict(row) for row in dt]
return result
def _get_dataset_manager(self, dataset_id):
sql ='''
select u.fullname as name, u.id from package p left join "user" u on p.creator_user_id=u.id where p.id=:dataset_id;
'''
dt = model.Session.execute(sql, {'dataset_id': dataset_id}).fetchall()
if (len(dt) == 0 ) :
return None
else :
return [dict(row) for row in dt]
def index (self):
LIMIT = 20
page = int(request.params.get('page', 1))
c.q = request.params.get('q', '')
c.order_by = request.params.get('order_by', 'name')
context = {'return_query': True, 'user': c.user or c.author,
'auth_user_obj': c.userobj}
data_dict = {'q': c.q,
'limit': LIMIT,
'offset': (page - 1) * LIMIT,
'order_by': c.order_by}
try:
logic.check_access('user_list', context, data_dict)
except logic.NotAuthorized:
base.abort(401, _('Not authorized to see this page'))
users_list = logic.get_action('user_list')(context, data_dict)
c.users = users_list
c.page = h.Page(
collection=users_list,
page=page,
url=h.pager_url,
item_count=users_list.count(),
items_per_page=LIMIT
)
return base.render('muser/index.html')
def new (self, data=None, errors=None, error_summary=None):
#q = model.Session.query(model.User).filter(model.User.sysadmin==True)
#c.sysadmins = [a.name for a in q.all()]
'''GET to display a form for registering a new user.
or POST the form data to actually do the user registration.
'''
context = {'model': model, 'session': model.Session,
'user': c.user or c.author,
'auth_user_obj': c.userobj,
'schema': self._new_form_to_db_schema(),
'save': 'save' in request.params}
c.is_sysadmin = new_authz.is_sysadmin(c.user)
if not c.user or not c.is_sysadmin:
return base.render('user/logout_first.html')
try:
logic.check_access('user_create', context)
except logic.NotAuthorized:
base.abort(401, _('Unauthorized to create a user'))
if context['save'] and not data:
return self._save_new(context)
c.data = data or {}
c.errors = errors or {}
c.error_summary = error_summary or {}
#vars = {'data': data, 'errors': errors, 'error_summary': error_summary}
#c.form = render(self.new_user_form, extra_vars=vars)
#return render('user/new.html')
return base.render('muser/new.html')
def _new_form_to_db_schema(self):
return schema.user_new_form_schema()
def _save_new(self, context):
try:
data_dict = logic.clean_dict(dictization_functions.unflatten(
logic.tuplize_dict(logic.parse_params(request.params))))
context['message'] = data_dict.get('log_message', '')
captcha.check_recaptcha(request)
user = logic.get_action('user_create')(context, data_dict)
except logic.NotAuthorized:
base.abort(401, _('Unauthorized to create user %s') % '')
except logic.NotFound, e:
base.abort(404, _('User not found'))
except dictization_functions.DataError:
base.abort(400, _(u'Integrity Error'))
except captcha.CaptchaError:
error_msg = _(u'Bad Captcha. Please try again.')
h.flash_error(error_msg)
return self.new(data_dict)
except logic.ValidationError, e:
c.errors = e.error_dict
c.error_summary = e.error_summary
return self.new(data_dict, c.errors, c.error_summary)
# success
h.flash_success(_('User "%s" is now registered.') % (data_dict['name']))
#return base.render('user/logout_first.html')
return base.render('muser/new.html')
def edit(self, id=None, data=None, errors=None, error_summary=None):
context = {'save': 'save' in request.params,
'schema': self._edit_form_to_db_schema(),
'model': model, 'session': model.Session,
'user': c.user, 'auth_user_obj': c.userobj
}
if id is None:
base.abort(400, _('No user specified'))
if not new_authz.is_sysadmin(c.user):
base.abort(401, _('User %s not authorized to edit %s') % (str(c.user), id))
data_dict = {'id': id}
try:
logic.check_access('user_update', context, data_dict)
except logic.NotAuthorized:
base.abort(401, _('Unauthorized to edit a user.'))
if (context['save']) and not data:
return self._save_edit(id, context)
try:
old_data = logic.get_action('user_show')(context, data_dict)
schema = self._db_to_edit_form_schema()
if schema:
old_data, errors = validate(old_data, schema)
c.display_name = old_data.get('display_name')
c.user_name = old_data.get('name')
data = data or old_data
except logic.NotAuthorized:
base.abort(401, _('Unauthorized to edit user %s') % '')
except logic.NotFound:
base.abort(404, _('User not found'))
user_obj = context.get('user_obj')
errors = errors or {}
vars = {'data': data, 'errors': errors, 'error_summary': error_summary}
self._setup_template_variables({'model': model,
'session': model.Session,
'user': c.user or c.author},
data_dict)
log.warn(vars.__repr__())
log.warn('muser edit: 1')
c.is_myself = True
c.show_email_notifications = h.asbool(
config.get('ckan.activity_streams_email_notifications'))
log.warn('muser edit: 2')
c.form = base.render('muser/edit_user_form.html', extra_vars=vars)
log.warn('muser edit: 3')
return base.render('muser/edit.html')
def _save_edit(self, id, context):
try:
data_dict = logic.clean_dict(dictization_functions.unflatten(
logic.tuplize_dict(logic.parse_params(request.params))))
context['message'] = data_dict.get('log_message', '')
data_dict['id'] = id
# MOAN: Do I really have to do this here?
if 'activity_streams_email_notifications' not in data_dict:
data_dict['activity_streams_email_notifications'] = False
user = logic.get_action('user_update')(context, data_dict)
h.flash_success(_('Profile updated'))
user_index = h.url_for(controller='ckanext.ksext.controllers.MUser:MUserController', action='index')
h.redirect_to(user_index)
except logic.NotAuthorized:
base.abort(401, _('Unauthorized to edit user %s') % id)
except logic.NotFound, e:
base.abort(404, _('User not found'))
except dictization_functions.DataError:
base.abort(400, _(u'Integrity Error'))
except logic.ValidationError, e:
errors = e.error_dict
error_summary = e.error_summary
return self.edit(id, data_dict, errors, error_summary)
def _setup_template_variables(self, context, data_dict):
c.is_sysadmin = new_authz.is_sysadmin(c.user)
try:
user_dict = logic.get_action('user_show')(context, data_dict)
except logic.NotFound:
base.abort(404, _('User not found'))
except logic.NotAuthorized:
base.abort(401, _('Not authorized to see this page'))
c.user_dict = user_dict
c.is_myself = user_dict['name'] == c.user
c.about_formatted = h.render_markdown(user_dict['about'])
def _db_to_edit_form_schema(self):
'''This is an interface to manipulate data from the database
into a format suitable for the form (optional)'''
def _edit_form_to_db_schema(self):
return schema.user_edit_form_schema()
def delete(self, id):
'''Delete user with id passed as parameter'''
context = {'model': model,
'session': model.Session,
'user': c.user,
'auth_user_obj': c.userobj}
data_dict = {'id': id}
try:
logic.get_action('user_delete')(context, data_dict)
h.flash_success(_('User deleted!'))
user_index = h.url_for(controller='ckanext.ksext.controllers.MUser:MUserController', action='index')
h.redirect_to(user_index)
except logic.NotAuthorized:
msg = _('Unauthorized to delete user with id "{user_id}".')
base.abort(401, msg.format(user_id=id))
| 7,830 | 2,600 | 23 |
a156f74140169b47890b5b7f16f2a1189fccdb1f | 15,863 | py | Python | pypeit/core/load.py | finagle29/PypeIt | 418d6d24d24054ad590d2f06c0b4688ea18f492e | [
"BSD-3-Clause"
] | null | null | null | pypeit/core/load.py | finagle29/PypeIt | 418d6d24d24054ad590d2f06c0b4688ea18f492e | [
"BSD-3-Clause"
] | null | null | null | pypeit/core/load.py | finagle29/PypeIt | 418d6d24d24054ad590d2f06c0b4688ea18f492e | [
"BSD-3-Clause"
] | null | null | null | """ Module for loading PypeIt files
"""
import os
import warnings
import numpy as np
from astropy import units
from astropy.time import Time
from astropy.io import fits
from astropy.table import Table
from linetools.spectra.xspectrum1d import XSpectrum1D
from linetools.spectra.utils import collate
import linetools.utils
from pypeit import msgs
from IPython import embed
from pypeit.core import parse
# TODO I don't think we need this routine
def load_ext_to_array(hdulist, ext_id, ex_value='OPT', flux_value=True, nmaskedge=None):
'''
It will be called by load_1dspec_to_array.
Load one-d spectra from ext_id in the hdulist
Args:
hdulist: FITS HDU list
ext_id: extension name, i.e., 'SPAT1073-SLIT0001-DET03', 'OBJID0001-ORDER0003', 'OBJID0001-ORDER0002-DET01'
ex_value: 'OPT' or 'BOX'
flux_value: if True load fluxed data, else load unfluxed data
Returns:
tuple: Returns wave, flux, ivar, mask
'''
if (ex_value != 'OPT') and (ex_value != 'BOX'):
msgs.error('{:} is not recognized. Please change to either BOX or OPT.'.format(ex_value))
# get the order/slit information
ntrace0 = np.size(hdulist)-1
idx_names = []
for ii in range(ntrace0):
idx_names.append(hdulist[ii+1].name) # idx name
# Initialize ext
ext = None
for indx in (idx_names):
if ext_id in indx:
ext = indx
if ext is None:
msgs.error('Can not find extension {:}.'.format(ext_id))
else:
hdu_iexp = hdulist[ext]
wave = hdu_iexp.data['{:}_WAVE'.format(ex_value)]
mask = hdu_iexp.data['{:}_MASK'.format(ex_value)]
# Mask Edges
if nmaskedge is not None:
mask[:int(nmaskedge)] = False
mask[-int(nmaskedge):] = False
if flux_value:
flux = hdu_iexp.data['{:}_FLAM'.format(ex_value)]
ivar = hdu_iexp.data['{:}_FLAM_IVAR'.format(ex_value)]
else:
msgs.warn('Loading unfluxed spectra')
flux = hdu_iexp.data['{:}_COUNTS'.format(ex_value)]
ivar = hdu_iexp.data['{:}_COUNTS_IVAR'.format(ex_value)]
return wave, flux, ivar, mask
# TODO merge this with unpack orders
def load_1dspec_to_array(fnames, gdobj=None, order=None, ex_value='OPT', flux_value=True, nmaskedge=None):
'''
Load the spectra from the 1d fits file into arrays.
If Echelle, you need to specify which order you want to load.
It can NOT load all orders for Echelle data.
Args:
fnames (list): 1D spectra fits file(s)
gdobj (list): extension name (longslit/multislit) or objID (Echelle)
order (None or int): order number
ex_value (str): 'OPT' or 'BOX'
flux_value (bool): if True it will load fluxed spectra, otherwise load counts
Returns:
tuple: Returns the following:
- waves (ndarray): wavelength array of your spectra, see
below for the shape information of this array.
- fluxes (ndarray): flux array of your spectra
- ivars (ndarray): ivars of your spectra
- masks (ndarray, bool): mask array of your spectra
The shapes of all returns are exactly the same.
- Case 1: np.size(fnames)=np.size(gdobj)=1, order=None for
Longslit or order=N (an int number) for Echelle
Longslit/single order for a single fits file, they are 1D
arrays with the size equal to Nspec
- Case 2: np.size(fnames)=np.size(gdobj)>1, order=None for
Longslit or order=N (an int number) for Echelle
Longslit/single order for a list of fits files, 2D array,
the shapes are Nspec by Nexp
- Case 3: np.size(fnames)=np.size(gdobj)=1, order=None All
Echelle orders for a single fits file, 2D array, the
shapes are Nspec by Norders
- Case 4: np.size(fnames)=np.size(gdobj)>1, order=None All
Echelle orders for a list of fits files, 3D array, the
shapres are Nspec by Norders by Nexp
'''
# read in the first fits file
if isinstance(fnames, (list, np.ndarray)):
nexp = np.size(fnames)
fname0 = fnames[0]
elif isinstance(fnames, str):
nexp = 1
fname0 = fnames
hdulist = fits.open(fname0)
header = hdulist[0].header
npix = header['NPIX']
pypeline = header['PYPELINE']
# get the order/slit information
ntrace0 = np.size(hdulist)-1
idx_orders = []
for ii in range(ntrace0):
idx_orders.append(int(hdulist[ii+1].name.split('-')[1][5:])) # slit ID or order ID
if pypeline == "Echelle":
## np.unique automatically sort the returned array which is not what I want!!!
## order_vec = np.unique(idx_orders)
dum, order_vec_idx = np.unique(idx_orders, return_index=True)
order_vec = np.array(idx_orders)[np.sort(order_vec_idx)]
norder = np.size(order_vec)
else:
norder = 1
#TODO This is unneccessarily complicated. The nexp=1 case does the same operations as the nexp > 1 case. Refactor
# this so that it just does the same set of operations once and then reshapes the array at the end to give you what
# you want. Let's merge this with unpack orders
## Loading data from a single fits file
if nexp == 1:
# initialize arrays
if (order is None) and (pypeline == "Echelle"):
waves = np.zeros((npix, norder,nexp))
fluxes = np.zeros_like(waves)
ivars = np.zeros_like(waves)
masks = np.zeros_like(waves, dtype=bool)
for ii, iord in enumerate(order_vec):
ext_id = gdobj[0]+'-ORDER{:04d}'.format(iord)
wave_iord, flux_iord, ivar_iord, mask_iord = load_ext_to_array(hdulist, ext_id, ex_value=ex_value,
flux_value=flux_value, nmaskedge=nmaskedge)
waves[:,ii,0] = wave_iord
fluxes[:,ii,0] = flux_iord
ivars[:,ii,0] = ivar_iord
masks[:,ii,0] = mask_iord
else:
if pypeline == "Echelle":
ext_id = gdobj[0]+'-ORDER{:04d}'.format(order)
else:
ext_id = gdobj[0]
waves, fluxes, ivars, masks = load_ext_to_array(hdulist, ext_id, ex_value=ex_value, flux_value=flux_value,
nmaskedge=nmaskedge)
## Loading data from a list of fits files
else:
# initialize arrays
if (order is None) and (pypeline == "Echelle"):
# store all orders into one single array
waves = np.zeros((npix, norder, nexp))
else:
# store a specific order or longslit
waves = np.zeros((npix, nexp))
fluxes = np.zeros_like(waves)
ivars = np.zeros_like(waves)
masks = np.zeros_like(waves,dtype=bool)
for iexp in range(nexp):
hdulist_iexp = fits.open(fnames[iexp])
# ToDo: The following part can be removed if all data are reduced using the leatest pipeline
if pypeline == "Echelle":
ntrace = np.size(hdulist_iexp) - 1
idx_orders = []
for ii in range(ntrace):
idx_orders.append(int(hdulist_iexp[ii + 1].name.split('-')[1][5:])) # slit ID or order ID
dum, order_vec_idx = np.unique(idx_orders, return_index=True)
order_vec = np.array(idx_orders)[np.sort(order_vec_idx)]
# ToDo: The above part can be removed if all data are reduced using the leatest pipeline
if (order is None) and (pypeline == "Echelle"):
for ii, iord in enumerate(order_vec):
ext_id = gdobj[iexp]+'-ORDER{:04d}'.format(iord)
wave_iord, flux_iord, ivar_iord, mask_iord = load_ext_to_array(hdulist_iexp, ext_id, ex_value=ex_value,
nmaskedge = nmaskedge, flux_value=flux_value)
waves[:,ii,iexp] = wave_iord
fluxes[:,ii,iexp] = flux_iord
ivars[:,ii,iexp] = ivar_iord
masks[:,ii,iexp] = mask_iord
else:
if pypeline == "Echelle":
ext_id = gdobj[iexp]+'-ORDER{:04d}'.format(order)
else:
ext_id = gdobj[iexp]
wave, flux, ivar, mask = load_ext_to_array(hdulist_iexp, ext_id, ex_value=ex_value, flux_value=flux_value,
nmaskedge=nmaskedge)
waves[:, iexp] = wave
fluxes[:, iexp] = flux
ivars[:, iexp] = ivar
masks[:, iexp] = mask
return waves, fluxes, ivars, masks, header
def load_spec_order(fname,norder, objid=None, order=None, extract='OPT', flux=True):
"""
Loading single order spectrum from a PypeIt 1D specctrum fits file.
it will be called by ech_load_spec
Args:
fname (str) : The file name of your spec1d file
objid (str) : The id of the object you want to load. (default is the first object)
order (int) : which order you want to load (default is None, loading all orders)
extract (str) : 'OPT' or 'BOX'
flux (bool) : default is True, loading fluxed spectra
Returns:
XSpectrum1D: spectrum_out
"""
if objid is None:
objid = 0
if order is None:
msgs.error('Please specify which order you want to load')
# read extension name into a list
primary_header = fits.getheader(fname, 0)
nspec = primary_header['NSPEC']
extnames = [primary_header['EXT0001']] * nspec
for kk in range(nspec):
extnames[kk] = primary_header['EXT' + '{0:04}'.format(kk + 1)]
# Figure out which extension is the required data
extnames_array = np.reshape(np.array(extnames),(norder,int(nspec/norder)))
extnames_good = extnames_array[:,int(objid[3:])-1]
extname = extnames_good[order]
try:
exten = extnames.index(extname) + 1
msgs.info("Loading extension {:s} of spectrum {:s}".format(extname, fname))
except:
msgs.error("Spectrum {:s} does not contain {:s} extension".format(fname, extname))
spectrum = load_1dspec(fname, exten=exten, extract=extract, flux=flux)
# Polish a bit -- Deal with NAN, inf, and *very* large values that will exceed
# the floating point precision of float32 for var which is sig**2 (i.e. 1e38)
bad_flux = np.any([np.isnan(spectrum.flux), np.isinf(spectrum.flux),
np.abs(spectrum.flux) > 1e30,
spectrum.sig ** 2 > 1e10,
], axis=0)
# Sometimes Echelle spectra have zero wavelength
bad_wave = spectrum.wavelength < 1000.0*units.AA
bad_all = bad_flux + bad_wave
## trim bad part
wave_out,flux_out,sig_out = spectrum.wavelength[~bad_all],spectrum.flux[~bad_all],spectrum.sig[~bad_all]
spectrum_out = XSpectrum1D.from_tuple((wave_out,flux_out,sig_out), verbose=False)
#if np.sum(bad_flux):
# msgs.warn("There are some bad flux values in this spectrum. Will zero them out and mask them (not ideal)")
# spectrum.data['flux'][spectrum.select][bad_flux] = 0.
# spectrum.data['sig'][spectrum.select][bad_flux] = 0.
return spectrum_out
def ech_load_spec(files,objid=None,order=None,extract='OPT',flux=True):
"""
Loading Echelle spectra from a list of PypeIt 1D spectrum fits files
Args:
files (str) : The list of file names of your spec1d file
objid (str) : The id (one per fits file) of the object you want to load. (default is the first object)
order (int) : which order you want to load (default is None, loading all orders)
extract (str) : 'OPT' or 'BOX'
flux (bool) : default is True, loading fluxed spectra
Returns:
XSpectrum1D: spectrum_out
"""
nfiles = len(files)
if objid is None:
objid = ['OBJ0000'] * nfiles
elif len(objid) == 1:
objid = objid * nfiles
elif len(objid) != nfiles:
msgs.error('The length of objid should be either 1 or equal to the number of spectra files.')
fname = files[0]
ext_first = fits.getheader(fname, 1)
ext_final = fits.getheader(fname, -1)
norder = abs(ext_final['ECHORDER'] - ext_first['ECHORDER']) + 1
msgs.info('spectrum {:s} has {:d} orders'.format(fname, norder))
if norder <= 1:
msgs.error('The number of orders have to be greater than one for echelle. Longslit data?')
# Load spectra
spectra_list = []
for ii, fname in enumerate(files):
if order is None:
msgs.info('Loading all orders into a gaint spectra')
for iord in range(norder):
spectrum = load_spec_order(fname, norder, objid=objid[ii],order=iord,extract=extract,flux=flux)
# Append
spectra_list.append(spectrum)
elif order >= norder:
msgs.error('order number cannot greater than the total number of orders')
else:
spectrum = load_spec_order(fname,norder, objid=objid[ii], order=order, extract=extract, flux=flux)
# Append
spectra_list.append(spectrum)
# Join into one XSpectrum1D object
spectra = collate(spectra_list)
# Return
return spectra
def load_sens_dict(filename):
"""
Load a full (all slit) wv_calib dict
Includes converting the JSON lists of particular items into ndarray
Fills self.wv_calib and self.par
Args:
filename (str): Master file
Returns:
dict or None: self.wv_calib
"""
# Does the master file exist?
if not os.path.isfile(filename):
msgs.warn("No sensfunc file found with filename {:s}".format(filename))
return None
else:
msgs.info("Loading sensfunc from file {:s}".format(filename))
sens_dict = linetools.utils.loadjson(filename)
# Recast a few items as arrays
for key in sens_dict.keys():
try:
int(key)
except ValueError:
continue
else:
for tkey in sens_dict[key].keys():
if isinstance(sens_dict[key][tkey], list):
sens_dict[key][tkey] = np.array(sens_dict[key][tkey])
return sens_dict
def load_multiext_fits(filename, ext):
"""
Load data and primary header from a multi-extension FITS file
Args:
filename (:obj:`str`):
Name of the file.
ext (:obj:`str`, :obj:`int`, :obj:`list`):
One or more file extensions with data to return. The
extension can be designated by its 0-indexed integer
number or its name.
Returns:
tuple: Returns the image data from each provided extension.
If return_header is true, the primary header is also
returned.
"""
# Format the input and set the tuple for an empty return
_ext = ext if isinstance(ext, list) else [ext]
n_ext = len(_ext)
# Open the file
hdu = fits.open(filename)
head0 = hdu[0].header
# Only one extension
if n_ext == 1:
data = hdu[_ext[0]].data.astype(np.float)
return data, head0
# Multiple extensions
data = tuple([None if hdu[k].data is None else hdu[k].data.astype(np.float) for k in _ext])
# Return
return data+(head0,)
| 38.040767 | 128 | 0.60348 | """ Module for loading PypeIt files
"""
import os
import warnings
import numpy as np
from astropy import units
from astropy.time import Time
from astropy.io import fits
from astropy.table import Table
from linetools.spectra.xspectrum1d import XSpectrum1D
from linetools.spectra.utils import collate
import linetools.utils
from pypeit import msgs
from IPython import embed
from pypeit.core import parse
# TODO I don't think we need this routine
def load_ext_to_array(hdulist, ext_id, ex_value='OPT', flux_value=True, nmaskedge=None):
'''
It will be called by load_1dspec_to_array.
Load one-d spectra from ext_id in the hdulist
Args:
hdulist: FITS HDU list
ext_id: extension name, i.e., 'SPAT1073-SLIT0001-DET03', 'OBJID0001-ORDER0003', 'OBJID0001-ORDER0002-DET01'
ex_value: 'OPT' or 'BOX'
flux_value: if True load fluxed data, else load unfluxed data
Returns:
tuple: Returns wave, flux, ivar, mask
'''
if (ex_value != 'OPT') and (ex_value != 'BOX'):
msgs.error('{:} is not recognized. Please change to either BOX or OPT.'.format(ex_value))
# get the order/slit information
ntrace0 = np.size(hdulist)-1
idx_names = []
for ii in range(ntrace0):
idx_names.append(hdulist[ii+1].name) # idx name
# Initialize ext
ext = None
for indx in (idx_names):
if ext_id in indx:
ext = indx
if ext is None:
msgs.error('Can not find extension {:}.'.format(ext_id))
else:
hdu_iexp = hdulist[ext]
wave = hdu_iexp.data['{:}_WAVE'.format(ex_value)]
mask = hdu_iexp.data['{:}_MASK'.format(ex_value)]
# Mask Edges
if nmaskedge is not None:
mask[:int(nmaskedge)] = False
mask[-int(nmaskedge):] = False
if flux_value:
flux = hdu_iexp.data['{:}_FLAM'.format(ex_value)]
ivar = hdu_iexp.data['{:}_FLAM_IVAR'.format(ex_value)]
else:
msgs.warn('Loading unfluxed spectra')
flux = hdu_iexp.data['{:}_COUNTS'.format(ex_value)]
ivar = hdu_iexp.data['{:}_COUNTS_IVAR'.format(ex_value)]
return wave, flux, ivar, mask
# TODO merge this with unpack orders
def load_1dspec_to_array(fnames, gdobj=None, order=None, ex_value='OPT', flux_value=True, nmaskedge=None):
'''
Load the spectra from the 1d fits file into arrays.
If Echelle, you need to specify which order you want to load.
It can NOT load all orders for Echelle data.
Args:
fnames (list): 1D spectra fits file(s)
gdobj (list): extension name (longslit/multislit) or objID (Echelle)
order (None or int): order number
ex_value (str): 'OPT' or 'BOX'
flux_value (bool): if True it will load fluxed spectra, otherwise load counts
Returns:
tuple: Returns the following:
- waves (ndarray): wavelength array of your spectra, see
below for the shape information of this array.
- fluxes (ndarray): flux array of your spectra
- ivars (ndarray): ivars of your spectra
- masks (ndarray, bool): mask array of your spectra
The shapes of all returns are exactly the same.
- Case 1: np.size(fnames)=np.size(gdobj)=1, order=None for
Longslit or order=N (an int number) for Echelle
Longslit/single order for a single fits file, they are 1D
arrays with the size equal to Nspec
- Case 2: np.size(fnames)=np.size(gdobj)>1, order=None for
Longslit or order=N (an int number) for Echelle
Longslit/single order for a list of fits files, 2D array,
the shapes are Nspec by Nexp
- Case 3: np.size(fnames)=np.size(gdobj)=1, order=None All
Echelle orders for a single fits file, 2D array, the
shapes are Nspec by Norders
- Case 4: np.size(fnames)=np.size(gdobj)>1, order=None All
Echelle orders for a list of fits files, 3D array, the
shapres are Nspec by Norders by Nexp
'''
# read in the first fits file
if isinstance(fnames, (list, np.ndarray)):
nexp = np.size(fnames)
fname0 = fnames[0]
elif isinstance(fnames, str):
nexp = 1
fname0 = fnames
hdulist = fits.open(fname0)
header = hdulist[0].header
npix = header['NPIX']
pypeline = header['PYPELINE']
# get the order/slit information
ntrace0 = np.size(hdulist)-1
idx_orders = []
for ii in range(ntrace0):
idx_orders.append(int(hdulist[ii+1].name.split('-')[1][5:])) # slit ID or order ID
if pypeline == "Echelle":
## np.unique automatically sort the returned array which is not what I want!!!
## order_vec = np.unique(idx_orders)
dum, order_vec_idx = np.unique(idx_orders, return_index=True)
order_vec = np.array(idx_orders)[np.sort(order_vec_idx)]
norder = np.size(order_vec)
else:
norder = 1
#TODO This is unneccessarily complicated. The nexp=1 case does the same operations as the nexp > 1 case. Refactor
# this so that it just does the same set of operations once and then reshapes the array at the end to give you what
# you want. Let's merge this with unpack orders
## Loading data from a single fits file
if nexp == 1:
# initialize arrays
if (order is None) and (pypeline == "Echelle"):
waves = np.zeros((npix, norder,nexp))
fluxes = np.zeros_like(waves)
ivars = np.zeros_like(waves)
masks = np.zeros_like(waves, dtype=bool)
for ii, iord in enumerate(order_vec):
ext_id = gdobj[0]+'-ORDER{:04d}'.format(iord)
wave_iord, flux_iord, ivar_iord, mask_iord = load_ext_to_array(hdulist, ext_id, ex_value=ex_value,
flux_value=flux_value, nmaskedge=nmaskedge)
waves[:,ii,0] = wave_iord
fluxes[:,ii,0] = flux_iord
ivars[:,ii,0] = ivar_iord
masks[:,ii,0] = mask_iord
else:
if pypeline == "Echelle":
ext_id = gdobj[0]+'-ORDER{:04d}'.format(order)
else:
ext_id = gdobj[0]
waves, fluxes, ivars, masks = load_ext_to_array(hdulist, ext_id, ex_value=ex_value, flux_value=flux_value,
nmaskedge=nmaskedge)
## Loading data from a list of fits files
else:
# initialize arrays
if (order is None) and (pypeline == "Echelle"):
# store all orders into one single array
waves = np.zeros((npix, norder, nexp))
else:
# store a specific order or longslit
waves = np.zeros((npix, nexp))
fluxes = np.zeros_like(waves)
ivars = np.zeros_like(waves)
masks = np.zeros_like(waves,dtype=bool)
for iexp in range(nexp):
hdulist_iexp = fits.open(fnames[iexp])
# ToDo: The following part can be removed if all data are reduced using the leatest pipeline
if pypeline == "Echelle":
ntrace = np.size(hdulist_iexp) - 1
idx_orders = []
for ii in range(ntrace):
idx_orders.append(int(hdulist_iexp[ii + 1].name.split('-')[1][5:])) # slit ID or order ID
dum, order_vec_idx = np.unique(idx_orders, return_index=True)
order_vec = np.array(idx_orders)[np.sort(order_vec_idx)]
# ToDo: The above part can be removed if all data are reduced using the leatest pipeline
if (order is None) and (pypeline == "Echelle"):
for ii, iord in enumerate(order_vec):
ext_id = gdobj[iexp]+'-ORDER{:04d}'.format(iord)
wave_iord, flux_iord, ivar_iord, mask_iord = load_ext_to_array(hdulist_iexp, ext_id, ex_value=ex_value,
nmaskedge = nmaskedge, flux_value=flux_value)
waves[:,ii,iexp] = wave_iord
fluxes[:,ii,iexp] = flux_iord
ivars[:,ii,iexp] = ivar_iord
masks[:,ii,iexp] = mask_iord
else:
if pypeline == "Echelle":
ext_id = gdobj[iexp]+'-ORDER{:04d}'.format(order)
else:
ext_id = gdobj[iexp]
wave, flux, ivar, mask = load_ext_to_array(hdulist_iexp, ext_id, ex_value=ex_value, flux_value=flux_value,
nmaskedge=nmaskedge)
waves[:, iexp] = wave
fluxes[:, iexp] = flux
ivars[:, iexp] = ivar
masks[:, iexp] = mask
return waves, fluxes, ivars, masks, header
def load_spec_order(fname,norder, objid=None, order=None, extract='OPT', flux=True):
"""
Loading single order spectrum from a PypeIt 1D specctrum fits file.
it will be called by ech_load_spec
Args:
fname (str) : The file name of your spec1d file
objid (str) : The id of the object you want to load. (default is the first object)
order (int) : which order you want to load (default is None, loading all orders)
extract (str) : 'OPT' or 'BOX'
flux (bool) : default is True, loading fluxed spectra
Returns:
XSpectrum1D: spectrum_out
"""
if objid is None:
objid = 0
if order is None:
msgs.error('Please specify which order you want to load')
# read extension name into a list
primary_header = fits.getheader(fname, 0)
nspec = primary_header['NSPEC']
extnames = [primary_header['EXT0001']] * nspec
for kk in range(nspec):
extnames[kk] = primary_header['EXT' + '{0:04}'.format(kk + 1)]
# Figure out which extension is the required data
extnames_array = np.reshape(np.array(extnames),(norder,int(nspec/norder)))
extnames_good = extnames_array[:,int(objid[3:])-1]
extname = extnames_good[order]
try:
exten = extnames.index(extname) + 1
msgs.info("Loading extension {:s} of spectrum {:s}".format(extname, fname))
except:
msgs.error("Spectrum {:s} does not contain {:s} extension".format(fname, extname))
spectrum = load_1dspec(fname, exten=exten, extract=extract, flux=flux)
# Polish a bit -- Deal with NAN, inf, and *very* large values that will exceed
# the floating point precision of float32 for var which is sig**2 (i.e. 1e38)
bad_flux = np.any([np.isnan(spectrum.flux), np.isinf(spectrum.flux),
np.abs(spectrum.flux) > 1e30,
spectrum.sig ** 2 > 1e10,
], axis=0)
# Sometimes Echelle spectra have zero wavelength
bad_wave = spectrum.wavelength < 1000.0*units.AA
bad_all = bad_flux + bad_wave
## trim bad part
wave_out,flux_out,sig_out = spectrum.wavelength[~bad_all],spectrum.flux[~bad_all],spectrum.sig[~bad_all]
spectrum_out = XSpectrum1D.from_tuple((wave_out,flux_out,sig_out), verbose=False)
#if np.sum(bad_flux):
# msgs.warn("There are some bad flux values in this spectrum. Will zero them out and mask them (not ideal)")
# spectrum.data['flux'][spectrum.select][bad_flux] = 0.
# spectrum.data['sig'][spectrum.select][bad_flux] = 0.
return spectrum_out
def ech_load_spec(files,objid=None,order=None,extract='OPT',flux=True):
"""
Loading Echelle spectra from a list of PypeIt 1D spectrum fits files
Args:
files (str) : The list of file names of your spec1d file
objid (str) : The id (one per fits file) of the object you want to load. (default is the first object)
order (int) : which order you want to load (default is None, loading all orders)
extract (str) : 'OPT' or 'BOX'
flux (bool) : default is True, loading fluxed spectra
Returns:
XSpectrum1D: spectrum_out
"""
nfiles = len(files)
if objid is None:
objid = ['OBJ0000'] * nfiles
elif len(objid) == 1:
objid = objid * nfiles
elif len(objid) != nfiles:
msgs.error('The length of objid should be either 1 or equal to the number of spectra files.')
fname = files[0]
ext_first = fits.getheader(fname, 1)
ext_final = fits.getheader(fname, -1)
norder = abs(ext_final['ECHORDER'] - ext_first['ECHORDER']) + 1
msgs.info('spectrum {:s} has {:d} orders'.format(fname, norder))
if norder <= 1:
msgs.error('The number of orders have to be greater than one for echelle. Longslit data?')
# Load spectra
spectra_list = []
for ii, fname in enumerate(files):
if order is None:
msgs.info('Loading all orders into a gaint spectra')
for iord in range(norder):
spectrum = load_spec_order(fname, norder, objid=objid[ii],order=iord,extract=extract,flux=flux)
# Append
spectra_list.append(spectrum)
elif order >= norder:
msgs.error('order number cannot greater than the total number of orders')
else:
spectrum = load_spec_order(fname,norder, objid=objid[ii], order=order, extract=extract, flux=flux)
# Append
spectra_list.append(spectrum)
# Join into one XSpectrum1D object
spectra = collate(spectra_list)
# Return
return spectra
def load_sens_dict(filename):
"""
Load a full (all slit) wv_calib dict
Includes converting the JSON lists of particular items into ndarray
Fills self.wv_calib and self.par
Args:
filename (str): Master file
Returns:
dict or None: self.wv_calib
"""
# Does the master file exist?
if not os.path.isfile(filename):
msgs.warn("No sensfunc file found with filename {:s}".format(filename))
return None
else:
msgs.info("Loading sensfunc from file {:s}".format(filename))
sens_dict = linetools.utils.loadjson(filename)
# Recast a few items as arrays
for key in sens_dict.keys():
try:
int(key)
except ValueError:
continue
else:
for tkey in sens_dict[key].keys():
if isinstance(sens_dict[key][tkey], list):
sens_dict[key][tkey] = np.array(sens_dict[key][tkey])
return sens_dict
def waveids(fname):
infile = fits.open(fname)
pixels=[]
msgs.info("Loading fitted arc lines")
try:
o = 1
while True:
pixels.append(infile[o].data.astype(np.float))
o+=1
except:
pass
return pixels
def load_multiext_fits(filename, ext):
"""
Load data and primary header from a multi-extension FITS file
Args:
filename (:obj:`str`):
Name of the file.
ext (:obj:`str`, :obj:`int`, :obj:`list`):
One or more file extensions with data to return. The
extension can be designated by its 0-indexed integer
number or its name.
Returns:
tuple: Returns the image data from each provided extension.
If return_header is true, the primary header is also
returned.
"""
# Format the input and set the tuple for an empty return
_ext = ext if isinstance(ext, list) else [ext]
n_ext = len(_ext)
# Open the file
hdu = fits.open(filename)
head0 = hdu[0].header
# Only one extension
if n_ext == 1:
data = hdu[_ext[0]].data.astype(np.float)
return data, head0
# Multiple extensions
data = tuple([None if hdu[k].data is None else hdu[k].data.astype(np.float) for k in _ext])
# Return
return data+(head0,)
| 246 | 0 | 23 |